diff --git a/go.mod b/go.mod index 8377994d4..3899a70e1 100644 --- a/go.mod +++ b/go.mod @@ -97,6 +97,7 @@ require ( go.etcd.io/etcd/client/v3 v3.5.18 go.mongodb.org/mongo-driver v1.17.3 go.opencensus.io v0.24.0 // indirect + go.uber.org/zap v1.27.0 gocloud.dev v0.41.0 gocloud.dev/pubsub/natspubsub v0.41.0 gocloud.dev/pubsub/rabbitpubsub v0.41.0 @@ -356,7 +357,6 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/time v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect diff --git a/replace_glog.sh b/replace_glog.sh new file mode 100755 index 000000000..1a9d82092 --- /dev/null +++ b/replace_glog.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Find all Go files containing glog calls +files=$(grep -l "glog\." --include="*.go" -r .) + +# Check if any files were found +if [ -z "$files" ]; then + echo "No files found containing glog calls" + exit 0 +fi + +# Print the files that will be modified +echo "The following files will be modified:" +echo "$files" +echo + +# Ask for confirmation +read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Operation cancelled" + exit 1 +fi + +# Make the replacements +for file in $files; do + echo "Processing $file" + # Replace all glog function calls with log + sed -i '' 's/glog\./log\./g' "$file" +done + +echo "Replacement complete!" \ No newline at end of file diff --git a/replace_glog_calls.sh b/replace_glog_calls.sh new file mode 100755 index 000000000..5f73110ce --- /dev/null +++ b/replace_glog_calls.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Find all Go files containing glog.V calls +files=$(grep -l "glog.V" --include="*.go" -r .) + +# Check if any files were found +if [ -z "$files" ]; then + echo "No files found containing glog.V calls" + exit 0 +fi + +# Print the files that will be modified +echo "The following files will be modified:" +echo "$files" +echo + +# Ask for confirmation +read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Operation cancelled" + exit 1 +fi + +# Make the replacements +for file in $files; do + echo "Processing $file" + # Replace glog.V(n).Info with log.V(n).Info for n=0-4 + for level in {0..4}; do + # Replace Info calls + sed -i '' "s/glog.V($level).Info/log.V($level).Info/g" "$file" + # Replace Infof calls + sed -i '' "s/glog.V($level).Infof/log.V($level).Infof/g" "$file" + done +done + +echo "Replacement complete!" \ No newline at end of file diff --git a/replace_imports.sh b/replace_imports.sh new file mode 100755 index 000000000..50670775d --- /dev/null +++ b/replace_imports.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Find all Go files containing the old import path +files=$(grep -l "github.com/seaweedfs/seaweedfs/weed/glog" --include="*.go" -r .) + +# Check if any files were found +if [ -z "$files" ]; then + echo "No files found containing the old import path" + exit 0 +fi + +# Print the files that will be modified +echo "The following files will be modified:" +echo "$files" +echo + +# Ask for confirmation +read -p "Do you want to proceed with the replacement? (y/n) " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Operation cancelled" + exit 1 +fi + +# Make the replacements +for file in $files; do + echo "Processing $file" + # Use sed to replace the import path + sed -i '' 's|github.com/seaweedfs/seaweedfs/weed/glog|github.com/seaweedfs/seaweedfs/weed/util/log|g' "$file" +done + +echo "Replacement complete!" \ No newline at end of file diff --git a/swap_log_levels.sh b/swap_log_levels.sh new file mode 100755 index 000000000..ae7002eb6 --- /dev/null +++ b/swap_log_levels.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Find all Go files containing log.V calls +files=$(grep -l "log.V" --include="*.go" -r .) + +if [ -z "$files" ]; then + echo "No files found containing log.V calls" + exit 0 +fi + +# Create a temporary file for sed operations +temp_file=$(mktemp) + +# Process each file +for file in $files; do + echo "Processing $file" + + # First, replace log.V(-1) with a temporary placeholder + sed 's/log\.V(-1)/__TEMP_NEG_ONE__/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + + # Replace log.V(4) with log.V(-1) + sed 's/log\.V(4)/log.V(-1)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + + # Replace the temporary placeholder with log.V(4) + sed 's/__TEMP_NEG_ONE__/log.V(4)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + + # Swap log.V(0) and log.V(3) + sed 's/log\.V(0)/__TEMP_ZERO__/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + sed 's/log\.V(3)/log.V(0)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + sed 's/__TEMP_ZERO__/log.V(3)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + + # Swap log.V(1) and log.V(2) + sed 's/log\.V(1)/__TEMP_ONE__/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + sed 's/log\.V(2)/log.V(1)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + sed 's/__TEMP_ONE__/log.V(2)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" + + # Replace any other log.V(n) with log.V(-1) + sed -E 's/log\.V\([5-9][0-9]*\)/log.V(-1)/g' "$file" > "$temp_file" + mv "$temp_file" "$file" +done + +# Clean up +rm -f "$temp_file" + +echo "Log level swapping completed!" \ No newline at end of file diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index 52368f8cd..99b715095 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -7,7 +7,7 @@ import ( "path" "strconv" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -49,7 +49,7 @@ func main() { } datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644) if err != nil { - glog.Fatalf("Open Volume Data File [ERROR]: %v", err) + log.Fatalf("Open Volume Data File [ERROR]: %v", err) } datBackend := backend.NewDiskFile(datFile) defer datBackend.Close() @@ -57,7 +57,7 @@ func main() { superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { - glog.Fatalf("cannot parse existing super block: %v", err) + log.Fatalf("cannot parse existing super block: %v", err) } fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement) @@ -69,7 +69,7 @@ func main() { replica, err := super_block.NewReplicaPlacementFromString(*targetReplica) if err != nil { - glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) + log.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) } fmt.Printf("Changing replication to: %s\n", replica) @@ -82,7 +82,7 @@ func main() { ttl, err := needle.ReadTTL(*targetTTL) if err != nil { - glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) + log.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) } fmt.Printf("Changing ttl to: %s\n", ttl) @@ -96,7 +96,7 @@ func main() { header := superBlock.Bytes() if n, e := datBackend.WriteAt(header, 0); n == 0 || e != nil { - glog.Fatalf("cannot write super block: %v", e) + log.Fatalf("cannot write super block: %v", e) } fmt.Println("Change Applied.") diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go index e289fefe8..2269bc6b1 100644 --- a/unmaintained/diff_volume_servers/diff_volume_servers.go +++ b/unmaintained/diff_volume_servers/diff_volume_servers.go @@ -10,7 +10,7 @@ import ( "math" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -49,7 +49,7 @@ func main() { vid := uint32(*volumeId) servers := pb.ServerAddresses(*serversStr).ToAddresses() if len(servers) < 2 { - glog.Fatalf("You must specify more than 1 server\n") + log.Fatalf("You must specify more than 1 server\n") } var referenceServer pb.ServerAddress var maxOffset int64 @@ -57,7 +57,7 @@ func main() { for _, addr := range servers { files, offset, err := getVolumeFiles(vid, addr) if err != nil { - glog.Fatalf("Failed to copy idx from volume server %s\n", err) + log.Fatalf("Failed to copy idx from volume server %s\n", err) } allFiles[addr] = files if offset > maxOffset { @@ -103,7 +103,7 @@ func main() { id, err = getNeedleFileId(vid, nid, addr) } if err != nil { - glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err) + log.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err) } fmt.Println(id, addr, diffMsg) } diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index 164b5b238..8c5bf6f94 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -8,7 +8,7 @@ import ( "path" "strconv" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -45,26 +45,26 @@ func main() { } indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644) if err != nil { - glog.Fatalf("Read Volume Index %v", err) + log.Fatalf("Read Volume Index %v", err) } defer indexFile.Close() datFileName := path.Join(*fixVolumePath, fileName+".dat") datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644) if err != nil { - glog.Fatalf("Read Volume Data %v", err) + log.Fatalf("Read Volume Data %v", err) } datBackend := backend.NewDiskFile(datFile) defer datBackend.Close() newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed")) if err != nil { - glog.Fatalf("Write New Volume Data %v", err) + log.Fatalf("Write New Volume Data %v", err) } defer newDatFile.Close() superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { - glog.Fatalf("Read Volume Data superblock %v", err) + log.Fatalf("Read Volume Data superblock %v", err) } newDatFile.Write(superBlock.Bytes()) diff --git a/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go b/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go index 2b63d5d59..8965d35ca 100644 --- a/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go +++ b/unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -38,7 +38,7 @@ func main() { return nil } name := event.EventNotification.NewEntry.Name - glog.V(0).Infof("=> %s ts:%+v", name, time.Unix(0, event.TsNs)) + log.V(3).Infof("=> %s ts:%+v", name, time.Unix(0, event.TsNs)) id := name[4:] if x, err := strconv.Atoi(id); err == nil { if x != expected { @@ -59,7 +59,7 @@ func startGenerateMetadata() { for i := 0; i < *n; i++ { name := fmt.Sprintf("file%d", i) - glog.V(0).Infof("write %s/%s", *dir, name) + log.V(3).Infof("write %s/%s", *dir, name) if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: *dir, Entry: &filer_pb.Entry{ diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go index cfac97432..c29536949 100644 --- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -6,7 +6,7 @@ import ( "os" "path/filepath" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -50,7 +50,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in newFileName := filepath.Join(*volumePath, "dat_fixed") newDatFile, err := os.Create(newFileName) if err != nil { - glog.Fatalf("Write New Volume Data %v", err) + log.Fatalf("Write New Volume Data %v", err) } scanner.datBackend = backend.NewDiskFile(newDatFile) scanner.datBackend.WriteAt(scanner.block.Bytes(), 0) @@ -59,7 +59,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in checksum := Checksum(n) if scanner.hashes[checksum] { - glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset) + log.V(3).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset) return nil } scanner.hashes[checksum] = true @@ -85,13 +85,13 @@ func main() { if _, err := os.Stat(scanner.dir); err != nil { if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil { - glog.Fatalf("could not create output dir : %s", err) + log.Fatalf("could not create output dir : %s", err) } } err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) if err != nil { - glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + log.Fatalf("Reading Volume File [ERROR] %s\n", err) } } diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index a60e45760..53286ac5f 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -6,7 +6,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -34,7 +34,7 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) - glog.V(0).Infof("%d,%s%08x offset %d size %d(%s) cookie %08x appendedAt %v name %s", + log.V(3).Infof("%d,%s%08x offset %d size %d(%s) cookie %08x appendedAt %v name %s", *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.Name) return nil } @@ -48,6 +48,6 @@ func main() { scanner := &VolumeFileScanner4SeeDat{} err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) if err != nil { - glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + log.Fatalf("Reading Volume File [ERROR] %s\n", err) } } diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go index 87f00ebb0..e6c22bc5d 100644 --- a/unmaintained/see_idx/see_idx.go +++ b/unmaintained/see_idx/see_idx.go @@ -9,7 +9,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/types" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" @@ -36,7 +36,7 @@ func main() { } indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644) if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) + log.Fatalf("Create Volume Index [ERROR] %s\n", err) } defer indexFile.Close() diff --git a/weed/cluster/lock_client.go b/weed/cluster/lock_client.go index 6618f5d2f..70f4c23a6 100644 --- a/weed/cluster/lock_client.go +++ b/weed/cluster/lock_client.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -73,7 +73,7 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh for { if isLocked { if err := lock.AttemptToLock(lock_manager.LiveLockTTL); err != nil { - glog.V(0).Infof("Lost lock %s: %v", key, err) + log.V(3).Infof("Lost lock %s: %v", key, err) isLocked = false } } else { @@ -82,7 +82,7 @@ func (lc *LockClient) StartLongLivedLock(key string, owner string, onLockOwnerCh } } if lockOwner != lock.LockOwner() && lock.LockOwner() != "" { - glog.V(0).Infof("Lock owner changed from %s to %s", lockOwner, lock.LockOwner()) + log.V(3).Infof("Lock owner changed from %s to %s", lockOwner, lock.LockOwner()) onLockOwnerChange(lock.LockOwner()) lockOwner = lock.LockOwner() } @@ -102,7 +102,7 @@ func (lock *LiveLock) retryUntilLocked(lockDuration time.Duration) { return lock.AttemptToLock(lockDuration) }, func(err error) (shouldContinue bool) { if err != nil { - glog.Warningf("create lock %s: %s", lock.key, err) + log.Warningf("create lock %s: %s", lock.key, err) } return lock.renewToken == "" }) diff --git a/weed/cluster/lock_manager/distributed_lock_manager.go b/weed/cluster/lock_manager/distributed_lock_manager.go index 7de78410f..8b61063a2 100644 --- a/weed/cluster/lock_manager/distributed_lock_manager.go +++ b/weed/cluster/lock_manager/distributed_lock_manager.go @@ -2,7 +2,7 @@ package lock_manager import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "time" ) @@ -56,7 +56,7 @@ func (dlm *DistributedLockManager) FindLockOwner(key string) (owner string, move } if movedTo != dlm.Host { servers := dlm.LockRing.GetSnapshot() - glog.V(0).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers) + log.V(3).Infof("lock %s not on current %s but on %s from %v", key, dlm.Host, movedTo, servers) return } owner, err = dlm.lockManager.GetLockOwner(key) diff --git a/weed/cluster/lock_manager/lock_manager.go b/weed/cluster/lock_manager/lock_manager.go index ebc9dfeaa..d454a3a56 100644 --- a/weed/cluster/lock_manager/lock_manager.go +++ b/weed/cluster/lock_manager/lock_manager.go @@ -3,7 +3,7 @@ package lock_manager import ( "fmt" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "sync" "time" ) @@ -38,19 +38,19 @@ func (lm *LockManager) Lock(path string, expiredAtNs int64, token string, owner lm.accessLock.Lock() defer lm.accessLock.Unlock() - glog.V(4).Infof("lock %s %v %v %v", path, time.Unix(0, expiredAtNs), token, owner) + log.V(-1).Infof("lock %s %v %v %v", path, time.Unix(0, expiredAtNs), token, owner) if oldValue, found := lm.locks[path]; found { if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < time.Now().UnixNano() { // lock is expired, set to a new lock if token != "" { - glog.V(4).Infof("lock expired key %s non-empty token %v owner %v ts %s", path, token, owner, time.Unix(0, oldValue.ExpiredAtNs)) + log.V(-1).Infof("lock expired key %s non-empty token %v owner %v ts %s", path, token, owner, time.Unix(0, oldValue.ExpiredAtNs)) err = LockErrorNonEmptyTokenOnExpiredLock return } else { // new lock renewToken = uuid.New().String() - glog.V(4).Infof("key %s new token %v owner %v", path, renewToken, owner) + log.V(-1).Infof("key %s new token %v owner %v", path, renewToken, owner) lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} return } @@ -60,30 +60,30 @@ func (lm *LockManager) Lock(path string, expiredAtNs int64, token string, owner if oldValue.Token == token { // token matches, renew the lock renewToken = uuid.New().String() - glog.V(4).Infof("key %s old token %v owner %v => %v owner %v", path, oldValue.Token, oldValue.Owner, renewToken, owner) + log.V(-1).Infof("key %s old token %v owner %v => %v owner %v", path, oldValue.Token, oldValue.Owner, renewToken, owner) lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} return } else { if token == "" { // new lock - glog.V(4).Infof("key %s locked by %v", path, oldValue.Owner) + log.V(-1).Infof("key %s locked by %v", path, oldValue.Owner) err = fmt.Errorf("lock already owned by %v", oldValue.Owner) return } - glog.V(4).Infof("key %s expected token %v owner %v received %v from %v", path, oldValue.Token, oldValue.Owner, token, owner) + log.V(-1).Infof("key %s expected token %v owner %v received %v from %v", path, oldValue.Token, oldValue.Owner, token, owner) err = fmt.Errorf("lock: token mismatch") return } } else { - glog.V(4).Infof("key %s no lock owner %v", path, owner) + log.V(-1).Infof("key %s no lock owner %v", path, owner) if token == "" { // new lock - glog.V(4).Infof("key %s new token %v owner %v", path, token, owner) + log.V(-1).Infof("key %s new token %v owner %v", path, token, owner) renewToken = uuid.New().String() lm.locks[path] = &Lock{Token: renewToken, ExpiredAtNs: expiredAtNs, Owner: owner} return } else { - glog.V(4).Infof("key %s non-empty token %v owner %v", path, token, owner) + log.V(-1).Infof("key %s non-empty token %v owner %v", path, token, owner) err = LockErrorNonEmptyTokenOnNewLock return } @@ -99,13 +99,13 @@ func (lm *LockManager) Unlock(path string, token string) (isUnlocked bool, err e if oldValue.ExpiredAtNs > 0 && oldValue.ExpiredAtNs < now.UnixNano() { // lock is expired, delete it isUnlocked = true - glog.V(4).Infof("key %s expired at %v", path, time.Unix(0, oldValue.ExpiredAtNs)) + log.V(-1).Infof("key %s expired at %v", path, time.Unix(0, oldValue.ExpiredAtNs)) delete(lm.locks, path) return } if oldValue.Token == token { isUnlocked = true - glog.V(4).Infof("key %s unlocked with %v", path, token) + log.V(-1).Infof("key %s unlocked with %v", path, token) delete(lm.locks, path) return } else { @@ -130,7 +130,7 @@ func (lm *LockManager) CleanUp() { continue } if now > value.ExpiredAtNs { - glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, value.ExpiredAtNs)) + log.V(-1).Infof("key %s expired at %v", key, time.Unix(0, value.ExpiredAtNs)) delete(lm.locks, key) } } @@ -148,12 +148,12 @@ func (lm *LockManager) SelectLocks(selectFn func(key string) bool) (locks []*Loc for key, lock := range lm.locks { if now > lock.ExpiredAtNs { - glog.V(4).Infof("key %s expired at %v", key, time.Unix(0, lock.ExpiredAtNs)) + log.V(-1).Infof("key %s expired at %v", key, time.Unix(0, lock.ExpiredAtNs)) delete(lm.locks, key) continue } if selectFn(key) { - glog.V(4).Infof("key %s selected and deleted", key) + log.V(-1).Infof("key %s selected and deleted", key) delete(lm.locks, key) lock.Key = key locks = append(locks, lock) diff --git a/weed/cluster/lock_manager/lock_ring.go b/weed/cluster/lock_manager/lock_ring.go index e7f60e6d2..bc392ce94 100644 --- a/weed/cluster/lock_manager/lock_ring.go +++ b/weed/cluster/lock_manager/lock_ring.go @@ -1,7 +1,7 @@ package lock_manager import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/util" "sort" @@ -40,11 +40,11 @@ func (r *LockRing) SetTakeSnapshotCallback(onTakeSnapshot func(snapshot []pb.Ser // AddServer adds a server to the ring // if the previous snapshot passed the snapshot interval, create a new snapshot func (r *LockRing) AddServer(server pb.ServerAddress) { - glog.V(0).Infof("add server %v", server) + log.V(3).Infof("add server %v", server) r.Lock() if _, found := r.candidateServers[server]; found { - glog.V(0).Infof("add server: already exists %v", server) + log.V(3).Infof("add server: already exists %v", server) r.Unlock() return } @@ -56,7 +56,7 @@ func (r *LockRing) AddServer(server pb.ServerAddress) { } func (r *LockRing) RemoveServer(server pb.ServerAddress) { - glog.V(0).Infof("remove server %v", server) + log.V(3).Infof("remove server %v", server) r.Lock() diff --git a/weed/cluster/master_client.go b/weed/cluster/master_client.go index bab2360fe..27cad52da 100644 --- a/weed/cluster/master_client.go +++ b/weed/cluster/master_client.go @@ -3,7 +3,7 @@ package cluster import ( "context" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "google.golang.org/grpc" @@ -17,7 +17,7 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp FilerGroup: filerGroup, }) - glog.V(0).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType) + log.V(3).Infof("the cluster has %d %s\n", len(resp.ClusterNodes), clientType) for _, node := range resp.ClusterNodes { existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{ NodeType: FilerType, @@ -28,7 +28,7 @@ func ListExistingPeerUpdates(master pb.ServerAddress, grpcDialOption grpc.DialOp } return err }); grpcErr != nil { - glog.V(0).Infof("connect to %s: %v", master, grpcErr) + log.V(3).Infof("connect to %s: %v", master, grpcErr) } return } diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 08db2ef3d..2076a6f26 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -17,7 +17,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/util" @@ -123,7 +123,7 @@ func runBenchmark(cmd *Command, args []string) bool { if *b.cpuprofile != "" { f, err := os.Create(*b.cpuprofile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() @@ -316,7 +316,7 @@ func readFiles(fileIdLineChan chan string, s *stat) { func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) { file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - glog.Fatalf("File to create file %s: %s\n", fileName, err) + log.Fatalf("File to create file %s: %s\n", fileName, err) } defer file.Close() @@ -335,7 +335,7 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b func readFileIds(fileName string, fileIdLineChan chan string) { file, err := os.Open(fileName) // For read access. if err != nil { - glog.Fatalf("File to read file %s: %s\n", fileName, err) + log.Fatalf("File to read file %s: %s\n", fileName, err) } defer file.Close() diff --git a/weed/command/compact.go b/weed/command/compact.go index 6f5f2307a..c3ff0a77c 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -1,7 +1,7 @@ package command import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/util" @@ -43,15 +43,15 @@ func runCompact(cmd *Command, args []string) bool { vid := needle.VolumeId(*compactVolumeId) v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0) if err != nil { - glog.Fatalf("Load Volume [ERROR] %s\n", err) + log.Fatalf("Load Volume [ERROR] %s\n", err) } if *compactMethod == 0 { if err = v.Compact(preallocate, 0); err != nil { - glog.Fatalf("Compact Volume [ERROR] %s\n", err) + log.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { if err = v.Compact2(preallocate, 0, nil); err != nil { - glog.Fatalf("Compact Volume [ERROR] %s\n", err) + log.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/export.go b/weed/command/export.go index e09d57056..0d38a775d 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -13,7 +13,7 @@ import ( "text/template" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" @@ -111,11 +111,11 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in vid := scanner.vid nv, ok := needleMap.Get(n.Id) - glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v", + log.V(0).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv) if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { - glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", + log.V(0).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) return nil } @@ -139,9 +139,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) } } - glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size) + log.V(1).Infof("This seems deleted %d size %d", n.Id, n.Size) } else { - glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size) + log.V(1).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size) } return nil } @@ -178,7 +178,7 @@ func runExport(cmd *Command, args []string) bool { outputFile = os.Stdout } else { if outputFile, err = os.Create(*output); err != nil { - glog.Fatalf("cannot open output tar %s: %s", *output, err) + log.Fatalf("cannot open output tar %s: %s", *output, err) } } defer outputFile.Close() @@ -201,7 +201,7 @@ func runExport(cmd *Command, args []string) bool { defer needleMap.Close() if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil { - glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) + log.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } volumeFileScanner := &VolumeFileScanner4Export{ @@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool { err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) if err != nil && err != io.EOF { - glog.Errorf("Export Volume File [ERROR] %s\n", err) + log.Errorf("Export Volume File [ERROR] %s\n", err) } return true } diff --git a/weed/command/filer.go b/weed/command/filer.go index 05b1e88c7..c5e447dc3 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -14,7 +14,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -324,44 +324,44 @@ func (fo *FilerOptions) startFiler() { AllowedOrigins: strings.Split(*fo.allowedOrigins, ","), }) if nfs_err != nil { - glog.Fatalf("Filer startup error: %v", nfs_err) + log.Fatalf("Filer startup error: %v", nfs_err) } if *fo.publicPort != 0 { publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort) - glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) + log.V(3).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) publicListener, localPublicListener, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0) if e != nil { - glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) + log.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) } go func() { if e := http.Serve(publicListener, publicVolumeMux); e != nil { - glog.Fatalf("Volume server fail to serve public: %v", e) + log.Fatalf("Volume server fail to serve public: %v", e) } }() if localPublicListener != nil { go func() { if e := http.Serve(localPublicListener, publicVolumeMux); e != nil { - glog.Errorf("Volume server fail to serve public: %v", e) + log.Errorf("Volume server fail to serve public: %v", e) } }() } } - glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) + log.V(3).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) filerListener, filerLocalListener, e := util.NewIpAndLocalListeners( *fo.bindIp, *fo.port, time.Duration(10)*time.Second, ) if e != nil { - glog.Fatalf("Filer listener error: %v", e) + log.Fatalf("Filer listener error: %v", e) } // starting grpc server grpcPort := *fo.portGrpc grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*fo.bindIp, grpcPort, 0) if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) + log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) @@ -378,13 +378,13 @@ func (fo *FilerOptions) startFiler() { localSocket = fmt.Sprintf("/tmp/seaweedfs-filer-%d.sock", *fo.port) } if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) + log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) } go func() { // start on local unix socket filerSocketListener, err := net.Listen("unix", localSocket) if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) + log.Fatalf("Failed to listen on %s: %v", localSocket, err) } httpS.Serve(filerSocketListener) }() @@ -402,14 +402,14 @@ func (fo *FilerOptions) startFiler() { RefreshDuration: security.CredRefreshingInterval, } if fo.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil { - glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) + log.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) } caCertPool := x509.NewCertPool() if caCertFile != "" { caCertFile, err := os.ReadFile(caCertFile) if err != nil { - glog.Fatalf("error reading CA certificate: %v", err) + log.Fatalf("error reading CA certificate: %v", err) } caCertPool.AppendCertsFromPEM(caCertFile) } @@ -428,23 +428,23 @@ func (fo *FilerOptions) startFiler() { if filerLocalListener != nil { go func() { if err := httpS.ServeTLS(filerLocalListener, "", ""); err != nil { - glog.Errorf("Filer Fail to serve: %v", e) + log.Errorf("Filer Fail to serve: %v", e) } }() } if err := httpS.ServeTLS(filerListener, "", ""); err != nil { - glog.Fatalf("Filer Fail to serve: %v", e) + log.Fatalf("Filer Fail to serve: %v", e) } } else { if filerLocalListener != nil { go func() { if err := httpS.Serve(filerLocalListener); err != nil { - glog.Errorf("Filer Fail to serve: %v", e) + log.Errorf("Filer Fail to serve: %v", e) } }() } if err := httpS.Serve(filerListener); err != nil { - glog.Fatalf("Filer Fail to serve: %v", e) + log.Fatalf("Filer Fail to serve: %v", e) } } } diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go index 380540fd9..3a47fc0d8 100644 --- a/weed/command/filer_backup.go +++ b/weed/command/filer_backup.go @@ -3,7 +3,7 @@ package command import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -78,7 +78,7 @@ func runFilerBackup(cmd *Command, args []string) bool { clientEpoch++ err := doFilerBackup(grpcDialOption, &filerBackupOptions, clientId, clientEpoch) if err != nil { - glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err) + log.Errorf("backup from %s: %v", *filerBackupOptions.filer, err) time.Sleep(1747 * time.Millisecond) } } @@ -118,14 +118,14 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti if timeAgo.Milliseconds() == 0 { lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId)) if err != nil { - glog.V(0).Infof("starting from %v", startFrom) + log.V(3).Infof("starting from %v", startFrom) } else { startFrom = time.Unix(0, lastOffsetTsNs) - glog.V(0).Infof("resuming from %v", startFrom) + log.V(3).Infof("resuming from %v", startFrom) } } else { startFrom = time.Now().Add(-timeAgo) - glog.V(0).Infof("start time is set to %v", startFrom) + log.V(3).Infof("start time is set to %v", startFrom) } // create filer sink @@ -146,7 +146,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti return nil } if errors.Is(err, http.ErrNotFound) { - glog.V(0).Infof("got 404 error, ignore it: %s", err.Error()) + log.V(3).Infof("got 404 error, ignore it: %s", err.Error()) return nil } return err @@ -156,7 +156,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti } processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error { - glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3)) + log.V(3).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3)) return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs) }) @@ -167,7 +167,7 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti time.Sleep(time.Hour * 24) key := util.Join(targetPath, now.Add(-1*time.Hour*24*time.Duration(*filerBackupOptions.retentionDays)).Format("2006-01-02")) _ = dataSink.DeleteEntry(util.Join(targetPath, key), true, true, nil) - glog.V(0).Infof("incremental backup delete directory:%s", key) + log.V(3).Infof("incremental backup delete directory:%s", key) } }() } diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go index e8c4680ba..09faea34a 100644 --- a/weed/command/filer_meta_backup.go +++ b/weed/command/filer_meta_backup.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/spf13/viper" "google.golang.org/grpc" "reflect" @@ -64,13 +64,13 @@ func runFilerMetaBackup(cmd *Command, args []string) bool { v.SetConfigFile(*metaBackup.backupFilerConfig) if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file - glog.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+ + log.Fatalf("Failed to load %s file: %v\nPlease use this command to generate the a %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", *metaBackup.backupFilerConfig, err, "backup_filer", "filer") } if err := metaBackup.initStore(v); err != nil { - glog.V(0).Infof("init backup filer store: %v", err) + log.V(3).Infof("init backup filer store: %v", err) return true } @@ -81,13 +81,13 @@ func runFilerMetaBackup(cmd *Command, args []string) bool { } if *metaBackup.restart || missingPreviousBackup { - glog.V(0).Infof("traversing metadata tree...") + log.V(3).Infof("traversing metadata tree...") startTime := time.Now() if err := metaBackup.traverseMetadata(); err != nil { - glog.Errorf("traverse meta data: %v", err) + log.Errorf("traverse meta data: %v", err) return true } - glog.V(0).Infof("metadata copied up to %v", startTime) + log.V(3).Infof("metadata copied up to %v", startTime) if err := metaBackup.setOffset(startTime); err != nil { startTime = time.Now() } @@ -96,7 +96,7 @@ func runFilerMetaBackup(cmd *Command, args []string) bool { for { err := metaBackup.streamMetadataBackup() if err != nil { - glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err) + log.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err) time.Sleep(1747 * time.Millisecond) } } @@ -111,9 +111,9 @@ func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error { if v.GetBool(store.GetName() + ".enabled") { store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore) if err := store.Initialize(v, store.GetName()+"."); err != nil { - glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + log.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) } - glog.V(0).Infof("configured filer store to %s", store.GetName()) + log.V(3).Infof("configured filer store to %s", store.GetName()) hasDefaultStoreConfigured = true metaBackup.store = filer.NewFilerStoreWrapper(store) break @@ -155,7 +155,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { if err != nil { startTime = time.Now() } - glog.V(0).Infof("streaming from %v", startTime) + log.V(3).Infof("streaming from %v", startTime) store := metaBackup.store @@ -192,7 +192,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error { lastTime := time.Unix(0, lastTsNs) - glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3)) + log.V(3).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3)) return metaBackup.setOffset(lastTime) }) diff --git a/weed/command/filer_remote_gateway.go b/weed/command/filer_remote_gateway.go index 3e52e8d3f..2c907c518 100644 --- a/weed/command/filer_remote_gateway.go +++ b/weed/command/filer_remote_gateway.go @@ -3,7 +3,7 @@ package command import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" @@ -115,7 +115,7 @@ func runFilerRemoteGateway(cmd *Command, args []string) bool { return remoteGatewayOptions.followBucketUpdatesAndUploadToRemote(filerSource) }, func(err error) bool { if err != nil { - glog.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err) + log.Errorf("synchronize %s: %v", remoteGatewayOptions.bucketsDir, err) } return true }) diff --git a/weed/command/filer_remote_gateway_buckets.go b/weed/command/filer_remote_gateway_buckets.go index f6fe9a99c..dc75116c5 100644 --- a/weed/command/filer_remote_gateway_buckets.go +++ b/weed/command/filer_remote_gateway_buckets.go @@ -3,7 +3,7 @@ package command import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" @@ -43,7 +43,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo return nil } now := time.Now().UnixNano() - glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) + log.V(3).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) lastLogTsNs = now return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), option.bucketsDir, offsetTsNs) }) @@ -78,12 +78,12 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour } if option.mappings.PrimaryBucketStorageName != "" && *option.createBucketAt == "" { *option.createBucketAt = option.mappings.PrimaryBucketStorageName - glog.V(0).Infof("%s is set as the primary remote storage", *option.createBucketAt) + log.V(3).Infof("%s is set as the primary remote storage", *option.createBucketAt) } if len(option.mappings.Mappings) == 1 && *option.createBucketAt == "" { for k := range option.mappings.Mappings { *option.createBucketAt = k - glog.V(0).Infof("%s is set as the only remote storage", *option.createBucketAt) + log.V(3).Infof("%s is set as the only remote storage", *option.createBucketAt) } } if *option.createBucketAt == "" { @@ -132,7 +132,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour bucketName = remoteLocation.Bucket } - glog.V(0).Infof("create bucket %s", bucketName) + log.V(3).Infof("create bucket %s", bucketName) if err := client.CreateBucket(bucketName); err != nil { return fmt.Errorf("create bucket %s in %s: %v", bucketName, remoteConf.Name, err) } @@ -150,7 +150,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour return fmt.Errorf("findRemoteStorageClient %s: %v", entry.Name, err) } - glog.V(0).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket) + log.V(3).Infof("delete remote bucket %s", remoteStorageMountLocation.Bucket) if err := client.DeleteBucket(remoteStorageMountLocation.Bucket); err != nil { return fmt.Errorf("delete remote bucket %s: %v", remoteStorageMountLocation.Bucket, err) } @@ -219,17 +219,17 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if err != nil { return err } - glog.V(2).Infof("create: %+v", resp) + log.V(1).Infof("create: %+v", resp) if !shouldSendToRemote(message.NewEntry) { - glog.V(2).Infof("skipping creating: %+v", resp) + log.V(1).Infof("skipping creating: %+v", resp) return nil } dest := toRemoteStorageLocation(bucket, util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation) if message.NewEntry.IsDirectory { - glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("mkdir %s", remote_storage.FormatLocation(dest)) return client.WriteDirectory(dest, message.NewEntry) } - glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("create %s", remote_storage.FormatLocation(dest)) remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest) if writeErr != nil { return writeErr @@ -248,13 +248,13 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if err != nil { return err } - glog.V(2).Infof("delete: %+v", resp) + log.V(1).Infof("delete: %+v", resp) dest := toRemoteStorageLocation(bucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation) if message.OldEntry.IsDirectory { - glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("rmdir %s", remote_storage.FormatLocation(dest)) return client.RemoveDirectory(dest) } - glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("delete %s", remote_storage.FormatLocation(dest)) return client.DeleteFile(dest) } if message.OldEntry != nil && message.NewEntry != nil { @@ -278,7 +278,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour newBucket, newRemoteStorageMountLocation, newRemoteStorage, newOk := option.detectBucketInfo(message.NewParentPath) if oldOk && newOk { if !shouldSendToRemote(message.NewEntry) { - glog.V(2).Infof("skipping updating: %+v", resp) + log.V(1).Infof("skipping updating: %+v", resp) return nil } client, err := remote_storage.GetRemoteStorage(oldRemoteStorage) @@ -292,7 +292,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour return nil } if message.OldEntry.RemoteEntry != nil && filer.IsSameData(message.OldEntry, message.NewEntry) { - glog.V(2).Infof("update meta: %+v", resp) + log.V(1).Infof("update meta: %+v", resp) oldDest := toRemoteStorageLocation(oldBucket, util.NewFullPath(resp.Directory, message.OldEntry.Name), oldRemoteStorageMountLocation) return client.UpdateFileMetadata(oldDest, message.OldEntry, message.NewEntry) } else { @@ -316,14 +316,14 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour if message.OldEntry.IsDirectory { return client.RemoveDirectory(oldDest) } - glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest)) + log.V(3).Infof("delete %s", remote_storage.FormatLocation(oldDest)) if err := client.DeleteFile(oldDest); err != nil { return err } } if newOk { if !shouldSendToRemote(message.NewEntry) { - glog.V(2).Infof("skipping updating: %+v", resp) + log.V(1).Infof("skipping updating: %+v", resp) return nil } client, err := remote_storage.GetRemoteStorage(newRemoteStorage) @@ -375,13 +375,13 @@ func (option *RemoteGatewayOptions) detectBucketInfo(actualDir string) (bucket u var isMounted bool remoteStorageMountLocation, isMounted = option.mappings.Mappings[string(bucket)] if !isMounted { - glog.Warningf("%s is not mounted", bucket) + log.Warningf("%s is not mounted", bucket) return "", nil, nil, false } var hasClient bool remoteConf, hasClient = option.remoteConfs[remoteStorageMountLocation.Name] if !hasClient { - glog.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation) + log.Warningf("%s mounted to un-configured %+v", bucket, remoteStorageMountLocation) return "", nil, nil, false } return bucket, remoteStorageMountLocation, remoteConf, true @@ -422,7 +422,7 @@ func (option *RemoteGatewayOptions) collectRemoteStorageConf() (err error) { }, "", false, math.MaxUint32) if option.mappings.PrimaryBucketStorageName == "" && len(option.remoteConfs) == 1 { - glog.V(0).Infof("%s is set to the default remote storage", lastConfName) + log.V(3).Infof("%s is set to the default remote storage", lastConfName) option.mappings.PrimaryBucketStorageName = lastConfName } diff --git a/weed/command/filer_remote_sync.go b/weed/command/filer_remote_sync.go index 77dd95134..3eb9f4aa1 100644 --- a/weed/command/filer_remote_sync.go +++ b/weed/command/filer_remote_sync.go @@ -2,7 +2,7 @@ package command import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -94,7 +94,7 @@ func runFilerRemoteSynchronize(cmd *Command, args []string) bool { return followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir) }, func(err error) bool { if err != nil { - glog.Errorf("synchronize %s: %v", dir, err) + log.Errorf("synchronize %s: %v", dir, err) } return true }) diff --git a/weed/command/filer_remote_sync_dir.go b/weed/command/filer_remote_sync_dir.go index 186523e45..f1bd34d93 100644 --- a/weed/command/filer_remote_sync_dir.go +++ b/weed/command/filer_remote_sync_dir.go @@ -9,7 +9,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" @@ -57,7 +57,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour } // use processor.processedTsWatermark instead of the lastTsNs from the most recent job now := time.Now().UnixNano() - glog.V(0).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) + log.V(3).Infof("remote sync %s progressed to %v %0.2f/sec", *option.filerAddress, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) lastLogTsNs = now return remote_storage.SetSyncOffset(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir, offsetTsNs) }) @@ -103,10 +103,10 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem } if remoteLoc, found := mappings.Mappings[mountedDir]; found { if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path { - glog.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc) + log.Fatalf("Unexpected mount changes %+v => %+v", remoteStorageMountLocation, remoteLoc) } } else { - glog.V(0).Infof("unmounted %s exiting ...", mountedDir) + log.V(3).Infof("unmounted %s exiting ...", mountedDir) os.Exit(0) } } @@ -142,17 +142,17 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem if !filer.HasData(message.NewEntry) { return nil } - glog.V(2).Infof("create: %+v", resp) + log.V(1).Infof("create: %+v", resp) if !shouldSendToRemote(message.NewEntry) { - glog.V(2).Infof("skipping creating: %+v", resp) + log.V(1).Infof("skipping creating: %+v", resp) return nil } dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation) if message.NewEntry.IsDirectory { - glog.V(0).Infof("mkdir %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("mkdir %s", remote_storage.FormatLocation(dest)) return client.WriteDirectory(dest, message.NewEntry) } - glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("create %s", remote_storage.FormatLocation(dest)) remoteEntry, writeErr := retriedWriteFile(client, filerSource, message.NewEntry, dest) if writeErr != nil { return writeErr @@ -160,13 +160,13 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem return updateLocalEntry(option, message.NewParentPath, message.NewEntry, remoteEntry) } if filer_pb.IsDelete(resp) { - glog.V(2).Infof("delete: %+v", resp) + log.V(1).Infof("delete: %+v", resp) dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation) if message.OldEntry.IsDirectory { - glog.V(0).Infof("rmdir %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("rmdir %s", remote_storage.FormatLocation(dest)) return client.RemoveDirectory(dest) } - glog.V(0).Infof("delete %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("delete %s", remote_storage.FormatLocation(dest)) return client.DeleteFile(dest) } if message.OldEntry != nil && message.NewEntry != nil { @@ -176,7 +176,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem oldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation) dest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation) if !shouldSendToRemote(message.NewEntry) { - glog.V(2).Infof("skipping updating: %+v", resp) + log.V(1).Infof("skipping updating: %+v", resp) return nil } if message.NewEntry.IsDirectory { @@ -184,12 +184,12 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem } if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name { if filer.IsSameData(message.OldEntry, message.NewEntry) { - glog.V(2).Infof("update meta: %+v", resp) + log.V(1).Infof("update meta: %+v", resp) return client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry) } } - glog.V(2).Infof("update: %+v", resp) - glog.V(0).Infof("delete %s", remote_storage.FormatLocation(oldDest)) + log.V(1).Infof("update: %+v", resp) + log.V(3).Infof("delete %s", remote_storage.FormatLocation(oldDest)) if err := client.DeleteFile(oldDest); err != nil { if isMultipartUploadFile(resp.Directory, message.OldEntry.Name) { return nil @@ -211,7 +211,7 @@ func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *so var writeErr error err = util.Retry("writeFile", func() error { reader := filer.NewFileReader(filerSource, newEntry) - glog.V(0).Infof("create %s", remote_storage.FormatLocation(dest)) + log.V(3).Infof("create %s", remote_storage.FormatLocation(dest)) remoteEntry, writeErr = client.WriteFile(dest, newEntry, reader) if writeErr != nil { return writeErr @@ -219,7 +219,7 @@ func retriedWriteFile(client remote_storage.RemoteStorageClient, filerSource *so return nil }) if err != nil { - glog.Errorf("write to %s: %v", dest, err) + log.Errorf("write to %s: %v", dest, err) } return } @@ -232,7 +232,7 @@ func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc if timeAgo == 0 { mountedDirEntry, err := filer_pb.GetEntry(filerClient, util.FullPath(mountedDir)) if err != nil { - glog.V(0).Infof("get mounted directory %s: %v", mountedDir, err) + log.V(3).Infof("get mounted directory %s: %v", mountedDir, err) return time.Now() } @@ -240,7 +240,7 @@ func collectLastSyncOffset(filerClient filer_pb.FilerClient, grpcDialOption grpc if mountedDirEntry != nil { if err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs/1000000 { lastOffsetTs = time.Unix(0, lastOffsetTsNs) - glog.V(0).Infof("resume from %v", lastOffsetTs) + log.V(3).Infof("resume from %v", lastOffsetTs) } else { lastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0) } diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index f53fdfb48..2455af817 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/replication" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/sub" @@ -42,10 +42,10 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { - glog.Fatalf("Failed to initialize notification input for %s: %+v", + log.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } - glog.V(0).Infof("Configure notification input to %s", input.GetName()) + log.V(3).Infof("Configure notification input to %s", input.GetName()) notificationInput = input break } @@ -63,7 +63,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { fromDir := config.GetString("source.filer.directory") toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { - glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) + log.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } } } @@ -83,7 +83,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for { key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage() if err != nil { - glog.Errorf("receive %s: %+v", key, err) + log.Errorf("receive %s: %+v", key, err) if onFailureFn != nil { onFailureFn() } @@ -97,19 +97,19 @@ func runFilerReplicate(cmd *Command, args []string) bool { continue } if m.OldEntry != nil && m.NewEntry == nil { - glog.V(1).Infof("delete: %s", key) + log.V(2).Infof("delete: %s", key) } else if m.OldEntry == nil && m.NewEntry != nil { - glog.V(1).Infof("add: %s", key) + log.V(2).Infof("add: %s", key) } else { - glog.V(1).Infof("modify: %s", key) + log.V(2).Infof("modify: %s", key) } if err = replicator.Replicate(context.Background(), key, m); err != nil { - glog.Errorf("replicate %s: %+v", key, err) + log.Errorf("replicate %s: %+v", key, err) if onFailureFn != nil { onFailureFn() } } else { - glog.V(1).Infof("replicated %s", key) + log.V(2).Infof("replicated %s", key) if onSuccessFn != nil { onSuccessFn() } @@ -123,10 +123,10 @@ func findSink(config *util.ViperProxy) sink.ReplicationSink { for _, sk := range sink.Sinks { if config.GetBool("sink." + sk.GetName() + ".enabled") { if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { - glog.Fatalf("Failed to initialize sink for %s: %+v", + log.Fatalf("Failed to initialize sink for %s: %+v", sk.GetName(), err) } - glog.V(0).Infof("Configure sink to %s", sk.GetName()) + log.V(3).Infof("Configure sink to %s", sk.GetName()) dataSink = sk break } @@ -141,7 +141,7 @@ func validateOneEnabledInput(config *util.ViperProxy) { if enabledInput == "" { enabledInput = input.GetName() } else { - glog.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName()) + log.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName()) } } } diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go index 9b489297c..a72423616 100644 --- a/weed/command/filer_sync.go +++ b/weed/command/filer_sync.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication" @@ -133,13 +133,13 @@ func runFilerSynchronize(cmd *Command, args []string) bool { // read a filer signature aFilerSignature, aFilerErr := replication.ReadFilerSignature(grpcDialOption, filerA) if aFilerErr != nil { - glog.Errorf("get filer 'a' signature %d error from %s to %s: %v", aFilerSignature, *syncOptions.filerA, *syncOptions.filerB, aFilerErr) + log.Errorf("get filer 'a' signature %d error from %s to %s: %v", aFilerSignature, *syncOptions.filerA, *syncOptions.filerB, aFilerErr) return true } // read b filer signature bFilerSignature, bFilerErr := replication.ReadFilerSignature(grpcDialOption, filerB) if bFilerErr != nil { - glog.Errorf("get filer 'b' signature %d error from %s to %s: %v", bFilerSignature, *syncOptions.filerA, *syncOptions.filerB, bFilerErr) + log.Errorf("get filer 'b' signature %d error from %s to %s: %v", bFilerSignature, *syncOptions.filerA, *syncOptions.filerB, bFilerErr) return true } @@ -148,7 +148,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool { // set synchronization start timestamp to offset initOffsetError := initOffsetFromTsMs(grpcDialOption, filerB, aFilerSignature, *syncOptions.bFromTsMs, getSignaturePrefixByPath(*syncOptions.aPath)) if initOffsetError != nil { - glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.bFromTsMs, *syncOptions.filerA, *syncOptions.filerB, initOffsetError) + log.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.bFromTsMs, *syncOptions.filerA, *syncOptions.filerB, initOffsetError) os.Exit(2) } for { @@ -174,7 +174,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool { aFilerSignature, bFilerSignature) if err != nil { - glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err) + log.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err) time.Sleep(1747 * time.Millisecond) } } @@ -185,7 +185,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool { // set synchronization start timestamp to offset initOffsetError := initOffsetFromTsMs(grpcDialOption, filerA, bFilerSignature, *syncOptions.aFromTsMs, getSignaturePrefixByPath(*syncOptions.bPath)) if initOffsetError != nil { - glog.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.aFromTsMs, *syncOptions.filerB, *syncOptions.filerA, initOffsetError) + log.Errorf("init offset from timestamp %d error from %s to %s: %v", *syncOptions.aFromTsMs, *syncOptions.filerB, *syncOptions.filerA, initOffsetError) os.Exit(2) } go func() { @@ -212,7 +212,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool { bFilerSignature, aFilerSignature) if err != nil { - glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err) + log.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err) time.Sleep(2147 * time.Millisecond) } } @@ -236,7 +236,7 @@ func initOffsetFromTsMs(grpcDialOption grpc.DialOption, targetFiler pb.ServerAdd if setOffsetErr != nil { return setOffsetErr } - glog.Infof("setOffset from timestamp ms success! start offset: %d from %s to %s", fromTsNs, *syncOptions.filerA, *syncOptions.filerB) + log.Infof("setOffset from timestamp ms success! start offset: %d from %s to %s", fromTsNs, *syncOptions.filerA, *syncOptions.filerB) return nil } @@ -250,7 +250,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti return err } - glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs) + log.V(3).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs) // create filer sink filerSource := &source.FilerSource{} @@ -273,7 +273,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti } if concurrency < 0 || concurrency > 1024 { - glog.Warningf("invalid concurrency value, using default: %d", DefaultConcurrencyLimit) + log.Warningf("invalid concurrency value, using default: %d", DefaultConcurrencyLimit) concurrency = DefaultConcurrencyLimit } processor := NewMetadataProcessor(processEventFn, concurrency, sourceFilerOffsetTsNs) @@ -290,7 +290,7 @@ func doSubscribeFilerMetaChanges(clientId int32, clientEpoch int32, grpcDialOpti } // use processor.processedTsWatermark instead of the lastTsNs from the most recent job now := time.Now().UnixNano() - glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) + log.V(3).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, offsetTsNs), float64(counter)/(float64(now-lastLogTsNs)/1e9)) lastLogTsNs = now // collect synchronous offset statsCollect.FilerSyncOffsetGauge.WithLabelValues(sourceFiler.String(), targetFiler.String(), clientName, sourcePath).Set(float64(offsetTsNs)) @@ -397,7 +397,7 @@ func genProcessFunction(sourcePath string, targetPath string, excludePaths []str } if debug { - glog.V(0).Infof("received %v", resp) + log.V(3).Infof("received %v", resp) } if isMultipartUploadDir(resp.Directory + "/") { diff --git a/weed/command/filer_sync_jobs.go b/weed/command/filer_sync_jobs.go index d49031b98..a5e3c294f 100644 --- a/weed/command/filer_sync_jobs.go +++ b/weed/command/filer_sync_jobs.go @@ -1,7 +1,7 @@ package command import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -46,7 +46,7 @@ func (t *MetadataProcessor) AddSyncJob(resp *filer_pb.SubscribeMetadataResponse) if err := util.Retry("metadata processor", func() error { return t.fn(resp) }); err != nil { - glog.Errorf("process %v: %v", resp, err) + log.Errorf("process %v: %v", resp, err) } t.activeJobsLock.Lock() diff --git a/weed/command/fix.go b/weed/command/fix.go index 4fb4ed88e..b55024a53 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" @@ -53,7 +53,7 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { } func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - glog.V(2).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) + log.V(1).Infof("key %v offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) if n.Size.IsValid() { if pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size); pe != nil { return fmt.Errorf("saved %d with error %v", n.Size, pe) @@ -64,7 +64,7 @@ func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64 return fmt.Errorf("saved deleted %d with error %v", n.Size, pe) } } else { - glog.V(2).Infof("skipping deleted file ...") + log.V(1).Infof("skipping deleted file ...") return scanner.nm.Delete(n.Id) } } @@ -163,19 +163,19 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil { err := fmt.Errorf("scan .dat File: %v", err) if *fixIgnoreError { - glog.Error(err) + log.Error(err) } else { - glog.Fatal(err) + log.Fatal(err) } } if err := SaveToIdx(scanner, indexFileName); err != nil { err := fmt.Errorf("save to .idx File: %v", err) if *fixIgnoreError { - glog.Error(err) + log.Error(err) } else { os.Remove(indexFileName) - glog.Fatal(err) + log.Fatal(err) } } } diff --git a/weed/command/iam.go b/weed/command/iam.go index f4a7df2ca..6b6ecd770 100644 --- a/weed/command/iam.go +++ b/weed/command/iam.go @@ -8,7 +8,7 @@ import ( "time" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/iamapi" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -56,14 +56,14 @@ func (iamopt *IamOptions) startIamServer() bool { if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerAddress, err) } - glog.V(0).Infof("IAM read filer configuration: %s", resp) + log.V(3).Infof("IAM read filer configuration: %s", resp) return nil }) if err != nil { - glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress()) time.Sleep(time.Second) } else { - glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerAddress.ToGrpcAddress()) break } } @@ -76,9 +76,9 @@ func (iamopt *IamOptions) startIamServer() bool { Port: *iamopt.port, GrpcDialOption: grpcDialOption, }) - glog.V(0).Info("NewIamApiServer created") + log.V(3).Info("NewIamApiServer created") if iamApiServer_err != nil { - glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err) + log.Fatalf("IAM API Server startup error: %v", iamApiServer_err) } httpS := &http.Server{Handler: router} @@ -86,19 +86,19 @@ func (iamopt *IamOptions) startIamServer() bool { listenAddress := fmt.Sprintf(":%d", *iamopt.port) iamApiListener, iamApiLocalListener, err := util.NewIpAndLocalListeners(*iamopt.ip, *iamopt.port, time.Duration(10)*time.Second) if err != nil { - glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) + log.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) } - glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) + log.V(3).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) if iamApiLocalListener != nil { go func() { if err = httpS.Serve(iamApiLocalListener); err != nil { - glog.Errorf("IAM API Server Fail to serve: %v", err) + log.Errorf("IAM API Server Fail to serve: %v", err) } }() } if err = httpS.Serve(iamApiListener); err != nil { - glog.Fatalf("IAM API Server Fail to serve: %v", err) + log.Fatalf("IAM API Server Fail to serve: %v", err) } return true diff --git a/weed/command/master.go b/weed/command/master.go index 4392c766f..49484fccb 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -22,7 +22,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -117,12 +117,12 @@ func runMaster(cmd *Command, args []string) bool { os.MkdirAll(*m.metaFolder, 0755) } if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil { - glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) + log.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } masterWhiteList := util.StringSplit(*m.whiteList, ",") if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { - glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") + log.Fatalf("volumeSizeLimitMB should be smaller than 30000") } switch { @@ -160,10 +160,10 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), masterPeers) listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) + log.V(3).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOption.ipBind, *masterOption.port, 0) if e != nil { - glog.Fatalf("Master startup error: %v", e) + log.Fatalf("Master startup error: %v", e) } // start raftServer @@ -183,12 +183,12 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { var err error if *masterOption.raftHashicorp { if raftServer, err = weed_server.NewHashicorpRaftServer(raftServerOption); err != nil { - glog.Fatalf("NewHashicorpRaftServer: %s", err) + log.Fatalf("NewHashicorpRaftServer: %s", err) } } else { raftServer, err = weed_server.NewRaftServer(raftServerOption) if raftServer == nil { - glog.Fatalf("please verify %s is writable, see https://github.com/seaweedfs/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) + log.Fatalf("please verify %s is writable, see https://github.com/seaweedfs/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) } } ms.SetRaftServer(raftServer) @@ -201,7 +201,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { grpcPort := *masterOption.portGrpc grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*masterOption.ipBind, grpcPort, 0) if err != nil { - glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) + log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) @@ -211,7 +211,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { protobuf.RegisterRaftServer(grpcS, raftServer) } reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) + log.V(3).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) if grpcLocalL != nil { go grpcS.Serve(grpcLocalL) } @@ -279,7 +279,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { } func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers string) (masterAddress pb.ServerAddress, cleanedPeers []pb.ServerAddress) { - glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers) + log.V(3).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers) masterAddress = pb.NewServerAddress(masterIp, masterPort, masterGrpcPort) cleanedPeers = pb.ServerAddresses(peers).ToAddresses() @@ -295,7 +295,7 @@ func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers strin cleanedPeers = append(cleanedPeers, masterAddress) } if len(cleanedPeers)%2 == 0 { - glog.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers) + log.Fatalf("Only odd number of masters are supported: %+v", cleanedPeers) } return } diff --git a/weed/command/master_follower.go b/weed/command/master_follower.go index 504ddb6c3..7f15090e8 100644 --- a/weed/command/master_follower.go +++ b/weed/command/master_follower.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -99,13 +99,13 @@ func startMasterFollower(masterOptions MasterOptions) { return nil }) if err != nil { - glog.V(0).Infof("failed to talk to filer %v: %v", masters, err) - glog.V(0).Infof("wait for %d seconds ...", i+1) + log.V(3).Infof("failed to talk to filer %v: %v", masters, err) + log.V(3).Infof("wait for %d seconds ...", i+1) time.Sleep(time.Duration(i+1) * time.Second) } } if err != nil { - glog.Errorf("failed to talk to filer %v: %v", masters, err) + log.Errorf("failed to talk to filer %v: %v", masters, err) return } @@ -119,22 +119,22 @@ func startMasterFollower(masterOptions MasterOptions) { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, option, masters) listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) + log.V(3).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOptions.ipBind, *masterOptions.port, 0) if e != nil { - glog.Fatalf("Master startup error: %v", e) + log.Fatalf("Master startup error: %v", e) } // starting grpc server grpcPort := *masterOptions.portGrpc grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*masterOptions.ipBind, grpcPort, 0) if err != nil { - glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) + log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort) + log.V(3).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort) if grpcLocalL != nil { go grpcS.Serve(grpcLocalL) } diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 1d1727519..0d5c645a6 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -3,7 +3,7 @@ package command import ( "bufio" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "io" "os" "strings" @@ -144,7 +144,7 @@ func checkMountPointAvailable(dir string) bool { if mounted, err := mounted(mountPoint); err != nil || mounted { if err != nil { - glog.Errorf("check %s: %v", mountPoint, err) + log.Errorf("check %s: %v", mountPoint, err) } return false } diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index f00daba8a..79cd8d632 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -16,7 +16,7 @@ import ( "time" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount" "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" "github.com/seaweedfs/seaweedfs/weed/mount/unmount" @@ -81,13 +81,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { return nil }) if err != nil { - glog.V(0).Infof("failed to talk to filer %v: %v", filerAddresses, err) - glog.V(0).Infof("wait for %d seconds ...", i+1) + log.V(3).Infof("failed to talk to filer %v: %v", filerAddresses, err) + log.V(3).Infof("wait for %d seconds ...", i+1) time.Sleep(time.Duration(i+1) * time.Second) } } if err != nil { - glog.Errorf("failed to talk to filer %v: %v", filerAddresses, err) + log.Errorf("failed to talk to filer %v: %v", filerAddresses, err) return true } @@ -111,11 +111,11 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { *option.localSocket = fmt.Sprintf("/tmp/seaweedfs-mount-%d.sock", mountDirHash) } if err := os.Remove(*option.localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", *option.localSocket, err.Error()) + log.Fatalf("Failed to remove %s, error: %s", *option.localSocket, err.Error()) } montSocketListener, err := net.Listen("unix", *option.localSocket) if err != nil { - glog.Fatalf("Failed to listen on %s: %v", *option.localSocket, err) + log.Fatalf("Failed to listen on %s: %v", *option.localSocket, err) } // detect mount folder mode @@ -158,7 +158,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { // Ensure target mount point availability if isValid := checkMountPointAvailable(dir); !isValid { - glog.Fatalf("Target mount point is not available: %s, please check!", dir) + log.Fatalf("Target mount point is not available: %s, please check!", dir) return true } @@ -262,7 +262,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { server, err := fuse.NewServer(seaweedFileSystem, dir, fuseMountOptions) if err != nil { - glog.Fatalf("Mount fail: %v", err) + log.Fatalf("Mount fail: %v", err) } grace.OnInterrupt(func() { unmount.Unmount(dir) @@ -279,8 +279,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool { return false } - glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir) - glog.V(0).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH) + log.V(3).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir) + log.V(3).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH) server.Serve() diff --git a/weed/command/mq_agent.go b/weed/command/mq_agent.go index ff4023e77..49f544766 100644 --- a/weed/command/mq_agent.go +++ b/weed/command/mq_agent.go @@ -5,7 +5,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" "google.golang.org/grpc/reflection" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/util" @@ -62,9 +62,9 @@ func (mqAgentOpt *MessageQueueAgentOptions) startQueueAgent() bool { // start grpc listener grpcL, _, err := util.NewIpAndLocalListeners(*mqAgentOpt.ip, *mqAgentOpt.port, 0) if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", *mqAgentOpt.port, err) + log.Fatalf("failed to listen on grpc port %d: %v", *mqAgentOpt.port, err) } - glog.Infof("Start Seaweed Message Queue Agent on %s:%d", *mqAgentOpt.ip, *mqAgentOpt.port) + log.Infof("Start Seaweed Message Queue Agent on %s:%d", *mqAgentOpt.ip, *mqAgentOpt.port) grpcS := pb.NewGrpcServer() mq_agent_pb.RegisterSeaweedMessagingAgentServer(grpcS, agentServer) reflection.Register(grpcS) diff --git a/weed/command/mq_broker.go b/weed/command/mq_broker.go index 5eb304204..e24da3705 100644 --- a/weed/command/mq_broker.go +++ b/weed/command/mq_broker.go @@ -5,7 +5,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util/grace" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/broker" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" @@ -79,13 +79,13 @@ func (mqBrokerOpt *MessageQueueBrokerOptions) startQueueServer() bool { Port: *mqBrokerOpt.port, }, grpcDialOption) if err != nil { - glog.Fatalf("failed to create new message broker for queue server: %v", err) + log.Fatalf("failed to create new message broker for queue server: %v", err) } // start grpc listener grpcL, _, err := util.NewIpAndLocalListeners("", *mqBrokerOpt.port, 0) if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", *mqBrokerOpt.port, err) + log.Fatalf("failed to listen on grpc port %d: %v", *mqBrokerOpt.port, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) mq_pb.RegisterSeaweedMessagingServer(grpcS, qs) diff --git a/weed/command/s3.go b/weed/command/s3.go index 4f513a5fa..36886b45e 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -25,7 +25,7 @@ import ( "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api" stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -212,14 +212,14 @@ func (s3opt *S3Options) startS3Server() bool { filerBucketsPath = resp.DirBuckets filerGroup = resp.FilerGroup metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) - glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + log.V(3).Infof("S3 read filer buckets dir: %s", filerBucketsPath) return nil }) if err != nil { - glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress()) time.Sleep(time.Second) } else { - glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerAddress.ToGrpcAddress()) break } } @@ -246,7 +246,7 @@ func (s3opt *S3Options) startS3Server() bool { FilerGroup: filerGroup, }) if s3ApiServer_err != nil { - glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) + log.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) } httpS := &http.Server{Handler: router} @@ -264,13 +264,13 @@ func (s3opt *S3Options) startS3Server() bool { localSocket = fmt.Sprintf("/tmp/seaweedfs-s3-%d.sock", *s3opt.port) } if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) + log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) } go func() { // start on local unix socket s3SocketListener, err := net.Listen("unix", localSocket) if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) + log.Fatalf("Failed to listen on %s: %v", localSocket, err) } httpS.Serve(s3SocketListener) }() @@ -280,7 +280,7 @@ func (s3opt *S3Options) startS3Server() bool { s3ApiListener, s3ApiLocalListener, err := util.NewIpAndLocalListeners( *s3opt.bindIp, *s3opt.port, time.Duration(*s3opt.idleTimeout)*time.Second) if err != nil { - glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) + log.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) } if len(*s3opt.auditLogConfig) > 0 { @@ -294,7 +294,7 @@ func (s3opt *S3Options) startS3Server() bool { grpcPort := *s3opt.portGrpc grpcL, grpcLocalL, err := util.NewIpAndLocalListeners(*s3opt.bindIp, grpcPort, 0) if err != nil { - glog.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err) + log.Fatalf("s3 failed to listen on grpc port %d: %v", grpcPort, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.s3")) s3_pb.RegisterSeaweedS3Server(grpcS, s3ApiServer) @@ -311,7 +311,7 @@ func (s3opt *S3Options) startS3Server() bool { RefreshDuration: security.CredRefreshingInterval, } if s3opt.certProvider, err = pemfile.NewProvider(pemfileOptions); err != nil { - glog.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) + log.Fatalf("pemfile.NewProvider(%v) failed: %v", pemfileOptions, err) } caCertPool := x509.NewCertPool() @@ -319,7 +319,7 @@ func (s3opt *S3Options) startS3Server() bool { // load CA certificate file and add it to list of client CAs caCertFile, err := ioutil.ReadFile(*s3opt.tlsCACertificate) if err != nil { - glog.Fatalf("error reading CA certificate: %v", err) + log.Fatalf("error reading CA certificate: %v", err) } caCertPool.AppendCertsFromPEM(caCertFile) } @@ -336,49 +336,49 @@ func (s3opt *S3Options) startS3Server() bool { } err = security.FixTlsConfig(util.GetViper(), httpS.TLSConfig) if err != nil { - glog.Fatalf("error with tls config: %v", err) + log.Fatalf("error with tls config: %v", err) } if *s3opt.portHttps == 0 { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) + log.V(3).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) if s3ApiLocalListener != nil { go func() { if err = httpS.ServeTLS(s3ApiLocalListener, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } }() } if err = httpS.ServeTLS(s3ApiListener, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.portHttps) + log.V(3).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.portHttps) s3ApiListenerHttps, s3ApiLocalListenerHttps, _ := util.NewIpAndLocalListeners( *s3opt.bindIp, *s3opt.portHttps, time.Duration(*s3opt.idleTimeout)*time.Second) if s3ApiLocalListenerHttps != nil { go func() { if err = httpS.ServeTLS(s3ApiLocalListenerHttps, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } }() } go func() { if err = httpS.ServeTLS(s3ApiListenerHttps, "", ""); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } }() } } if *s3opt.tlsPrivateKey == "" || *s3opt.portHttps > 0 { - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) + log.V(3).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) if s3ApiLocalListener != nil { go func() { if err = httpS.Serve(s3ApiLocalListener); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } }() } if err = httpS.Serve(s3ApiListener); err != nil { - glog.Fatalf("S3 API Server Fail to serve: %v", err) + log.Fatalf("S3 API Server Fail to serve: %v", err) } } diff --git a/weed/command/server.go b/weed/command/server.go index dd3b0c8b4..aeacb2790 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -9,7 +9,7 @@ import ( stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util/grace" @@ -281,14 +281,14 @@ func runServer(cmd *Command, args []string) bool { folders := strings.Split(*volumeDataFolders, ",") if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { - glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000") + log.Fatalf("masterVolumeSizeLimitMB should be less than 30000") } if *masterOptions.metaFolder == "" { *masterOptions.metaFolder = folders[0] } if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil { - glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) + log.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) } filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder diff --git a/weed/command/sftp.go b/weed/command/sftp.go index 117f01d6e..1f8567331 100644 --- a/weed/command/sftp.go +++ b/weed/command/sftp.go @@ -8,7 +8,7 @@ import ( "runtime" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -110,14 +110,14 @@ func (sftpOpt *SftpOptions) startSftpServer() bool { } metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) filerGroup = resp.FilerGroup - glog.V(0).Infof("SFTP read filer configuration, using filer at: %s", filerAddress) + log.V(3).Infof("SFTP read filer configuration, using filer at: %s", filerAddress) return nil }) if err != nil { - glog.V(0).Infof("Waiting to connect to filer %s grpc address %s...", *sftpOpt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("Waiting to connect to filer %s grpc address %s...", *sftpOpt.filer, filerAddress.ToGrpcAddress()) time.Sleep(time.Second) } else { - glog.V(0).Infof("Connected to filer %s grpc address %s", *sftpOpt.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("Connected to filer %s grpc address %s", *sftpOpt.filer, filerAddress.ToGrpcAddress()) break } } @@ -154,16 +154,16 @@ func (sftpOpt *SftpOptions) startSftpServer() bool { localSocket = fmt.Sprintf("/tmp/seaweedfs-sftp-%d.sock", *sftpOpt.port) } if err := os.Remove(localSocket); err != nil && !os.IsNotExist(err) { - glog.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) + log.Fatalf("Failed to remove %s, error: %s", localSocket, err.Error()) } go func() { // start on local unix socket sftpSocketListener, err := net.Listen("unix", localSocket) if err != nil { - glog.Fatalf("Failed to listen on %s: %v", localSocket, err) + log.Fatalf("Failed to listen on %s: %v", localSocket, err) } if err := service.Serve(sftpSocketListener); err != nil { - glog.Fatalf("Failed to serve SFTP on socket %s: %v", localSocket, err) + log.Fatalf("Failed to serve SFTP on socket %s: %v", localSocket, err) } }() } @@ -172,21 +172,21 @@ func (sftpOpt *SftpOptions) startSftpServer() bool { listenAddress := fmt.Sprintf("%s:%d", *sftpOpt.bindIp, *sftpOpt.port) sftpListener, sftpLocalListener, err := util.NewIpAndLocalListeners(*sftpOpt.bindIp, *sftpOpt.port, time.Duration(10)*time.Second) if err != nil { - glog.Fatalf("SFTP server listener on %s error: %v", listenAddress, err) + log.Fatalf("SFTP server listener on %s error: %v", listenAddress, err) } - glog.V(0).Infof("Start Seaweed SFTP Server %s at %s", util.Version(), listenAddress) + log.V(3).Infof("Start Seaweed SFTP Server %s at %s", util.Version(), listenAddress) if sftpLocalListener != nil { go func() { if err := service.Serve(sftpLocalListener); err != nil { - glog.Fatalf("SFTP Server failed to serve on local listener: %v", err) + log.Fatalf("SFTP Server failed to serve on local listener: %v", err) } }() } if err := service.Serve(sftpListener); err != nil { - glog.Fatalf("SFTP Server failed to serve: %v", err) + log.Fatalf("SFTP Server failed to serve: %v", err) } return true diff --git a/weed/command/update.go b/weed/command/update.go index bf871d654..b611b14e5 100644 --- a/weed/command/update.go +++ b/weed/command/update.go @@ -18,7 +18,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "golang.org/x/net/context/ctxhttp" @@ -86,7 +86,7 @@ func runUpdate(cmd *Command, args []string) bool { if *updateOpt.dir != "" { if err := util.TestFolderWritable(util.ResolvePath(*updateOpt.dir)); err != nil { - glog.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err) + log.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err) return false } } else { @@ -101,16 +101,16 @@ func runUpdate(cmd *Command, args []string) bool { if runtime.GOOS == "windows" { if target == path { - glog.Fatalf("On windows, name of the new weed shouldn't be same to the original name.") + log.Fatalf("On windows, name of the new weed shouldn't be same to the original name.") return false } } - glog.V(0).Infof("new weed will be saved to %s", target) + log.V(3).Infof("new weed will be saved to %s", target) _, err := downloadRelease(context.Background(), target, *updateOpt.Version) if err != nil { - glog.Errorf("unable to download weed: %v", err) + log.Errorf("unable to download weed: %v", err) return false } return true @@ -125,14 +125,14 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st if rel.Version == currentVersion { if ver == "0" { - glog.V(0).Infof("weed is up to date") + log.V(3).Infof("weed is up to date") } else { - glog.V(0).Infof("no need to download the same version of weed ") + log.V(3).Infof("no need to download the same version of weed ") } return currentVersion, nil } - glog.V(0).Infof("download version: %s", rel.Version) + log.V(3).Infof("download version: %s", rel.Version) largeDiskSuffix := "" if util.VolumeSizeLimitGB == 8000 { @@ -165,7 +165,7 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st md5Ctx.Write(buf) binaryMd5 := md5Ctx.Sum(nil) if hex.EncodeToString(binaryMd5) != string(md5Val[0:32]) { - glog.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32])) + log.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32])) err = fmt.Errorf("binary md5sum doesn't match") return "", err } @@ -174,7 +174,7 @@ func downloadRelease(ctx context.Context, target string, ver string) (version st if err != nil { return "", err } else { - glog.V(0).Infof("successfully updated weed to version %v\n", rel.Version) + log.V(3).Infof("successfully updated weed to version %v\n", rel.Version) } return rel.Version, nil @@ -228,7 +228,7 @@ func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (R } if ver == "0" { release = releaseList[0] - glog.V(0).Infof("latest version is %v\n", release.TagName) + log.V(3).Infof("latest version is %v\n", release.TagName) } else { for _, r := range releaseList { if r.TagName == ver { @@ -287,7 +287,7 @@ func getGithubDataFile(ctx context.Context, assets []Asset, suffix string) (file return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix) } - glog.V(0).Infof("download %v\n", filename) + log.V(3).Infof("download %v\n", filename) data, err = getGithubData(ctx, url) if err != nil { return "", nil, err @@ -310,9 +310,9 @@ func extractToFile(buf []byte, filename, target string) error { hdr, terr := trd.Next() if terr != nil { if hdr != nil { - glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr) + log.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr) } else { - glog.Errorf("uncompress file is nil, failed:%s", terr) + log.Errorf("uncompress file is nil, failed:%s", terr) } return terr @@ -371,6 +371,6 @@ func extractToFile(buf []byte, filename, target string) error { return err } - glog.V(0).Infof("saved %d bytes in %v\n", n, target) + log.V(3).Infof("saved %d bytes in %v\n", n, target) return os.Chmod(target, mode) } diff --git a/weed/command/volume.go b/weed/command/volume.go index 2389d5561..d298447d1 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -24,7 +24,7 @@ import ( "google.golang.org/grpc/reflection" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" weed_server "github.com/seaweedfs/seaweedfs/weed/server" stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" @@ -156,7 +156,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v v.folders = strings.Split(volumeFolders, ",") for _, folder := range v.folders { if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil { - glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + log.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) } } @@ -166,7 +166,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if max, e := strconv.ParseInt(maxString, 10, 64); e == nil { v.folderMaxLimits = append(v.folderMaxLimits, int32(max)) } else { - glog.Fatalf("The max specified in -max not a valid number %s", maxString) + log.Fatalf("The max specified in -max not a valid number %s", maxString) } } if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 { @@ -175,7 +175,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } if len(v.folders) != len(v.folderMaxLimits) { - glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) + log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) } if len(minFreeSpaces) == 1 && len(v.folders) > 1 { @@ -184,7 +184,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } if len(v.folders) != len(minFreeSpaces) { - glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces)) + log.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces)) } // set disk types @@ -199,7 +199,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } if len(v.folders) != len(diskTypes) { - glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes)) + log.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes)) } // security related white list configuration @@ -207,7 +207,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if *v.ip == "" { *v.ip = util.DetectedHostAddress() - glog.V(0).Infof("detected volume server ip address: %v", *v.ip) + log.V(3).Infof("detected volume server ip address: %v", *v.ip) } if *v.bindIp == "" { *v.bindIp = *v.ip @@ -272,7 +272,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if v.isSeparatedPublicPort() { publicHttpDown = v.startPublicHttpService(publicVolumeMux) if nil == publicHttpDown { - glog.Fatalf("start public http service failed") + log.Fatalf("start public http service failed") } } @@ -289,7 +289,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v // Stop heartbeats if !volumeServer.StopHeartbeat() { volumeServer.SetStopping() - glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds) + log.V(3).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds) time.Sleep(time.Duration(*v.preStopSeconds) * time.Second) } @@ -307,18 +307,18 @@ func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server, // firstly, stop the public http service to prevent from receiving new user request if nil != publicHttpDown { - glog.V(0).Infof("stop public http server ... ") + log.V(3).Infof("stop public http server ... ") if err := publicHttpDown.Stop(); err != nil { - glog.Warningf("stop the public http server failed, %v", err) + log.Warningf("stop the public http server failed, %v", err) } } - glog.V(0).Infof("graceful stop cluster http server ... ") + log.V(3).Infof("graceful stop cluster http server ... ") if err := clusterHttpServer.Stop(); err != nil { - glog.Warningf("stop the cluster http server failed, %v", err) + log.Warningf("stop the cluster http server failed, %v", err) } - glog.V(0).Infof("graceful stop gRPC ...") + log.V(3).Infof("graceful stop gRPC ...") grpcS.GracefulStop() volumeServer.Shutdown() @@ -336,14 +336,14 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe grpcPort := *v.portGrpc grpcL, err := util.NewListener(util.JoinHostPort(*v.bindIp, grpcPort), 0) if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) + log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) go func() { if err := grpcS.Serve(grpcL); err != nil { - glog.Fatalf("start gRPC service failed, %s", err) + log.Fatalf("start gRPC service failed, %s", err) } }() return grpcS @@ -351,17 +351,17 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) + log.V(3).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { - glog.Fatalf("Volume server listener error:%v", e) + log.Fatalf("Volume server listener error:%v", e) } pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute} publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener) go func() { if err := publicHttpDown.Wait(); err != nil { - glog.Errorf("public http down wait failed, %v", err) + log.Errorf("public http down wait failed, %v", err) } }() @@ -378,10 +378,10 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd } listeningAddress := util.JoinHostPort(*v.bindIp, *v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) + log.V(3).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { - glog.Fatalf("Volume server listener error:%v", e) + log.Fatalf("Volume server listener error:%v", e) } httpDown := httpdown.HTTP{ @@ -399,7 +399,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd clusterHttpServer := httpDown.Serve(httpS, listener) go func() { if e := clusterHttpServer.Wait(); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) + log.Fatalf("Volume server fail to serve: %v", e) } }() return clusterHttpServer diff --git a/weed/command/volume_test.go b/weed/command/volume_test.go index 801041a88..7693620a1 100644 --- a/weed/command/volume_test.go +++ b/weed/command/volume_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func TestXYZ(t *testing.T) { - glog.V(0).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat)) + log.V(3).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat)) } diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 02798b9b6..13cc40cf4 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -9,7 +9,7 @@ import ( "strconv" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -65,7 +65,7 @@ func runWebDav(cmd *Command, args []string) bool { util.LoadSecurityConfiguration() listenAddress := fmt.Sprintf("%s:%d", *webDavStandaloneOptions.ipBind, *webDavStandaloneOptions.port) - glog.V(0).Infof("Starting Seaweed WebDav Server %s at %s", util.Version(), listenAddress) + log.V(3).Infof("Starting Seaweed WebDav Server %s at %s", util.Version(), listenAddress) return webDavStandaloneOptions.startWebDav() @@ -101,10 +101,10 @@ func (wo *WebDavOption) startWebDav() bool { return nil }) if err != nil { - glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress()) time.Sleep(time.Second) } else { - glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress()) + log.V(3).Infof("connected to filer %s grpc address %s", *wo.filer, filerAddress.ToGrpcAddress()) break } } @@ -124,7 +124,7 @@ func (wo *WebDavOption) startWebDav() bool { MaxMB: *wo.maxMB, }) if webdavServer_err != nil { - glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) + log.Fatalf("WebDav Server startup error: %v", webdavServer_err) } httpS := &http.Server{Handler: ws.Handler} @@ -132,18 +132,18 @@ func (wo *WebDavOption) startWebDav() bool { listenAddress := fmt.Sprintf("%s:%d", *wo.ipBind, *wo.port) webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) if err != nil { - glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err) + log.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err) } if *wo.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed WebDav Server %s at https %s", util.Version(), listenAddress) + log.V(3).Infof("Start Seaweed WebDav Server %s at https %s", util.Version(), listenAddress) if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { - glog.Fatalf("WebDav Server Fail to serve: %v", err) + log.Fatalf("WebDav Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed WebDav Server %s at http %s", util.Version(), listenAddress) + log.V(3).Infof("Start Seaweed WebDav Server %s at http %s", util.Version(), listenAddress) if err = httpS.Serve(webDavListener); err != nil { - glog.Fatalf("WebDav Server Fail to serve: %v", err) + log.Fatalf("WebDav Server Fail to serve: %v", err) } } diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go index 1d175651d..b2a74fb74 100644 --- a/weed/filer/abstract_sql/abstract_sql_store.go +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -5,7 +5,7 @@ import ( "database/sql" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3bucket" "github.com/seaweedfs/seaweedfs/weed/util" @@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") { // now the insert failed possibly due to duplication constraints sqlInsert = "falls back to update" - glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err) + log.V(2).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err) res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) } if err != nil { @@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat } } - glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + log.V(-1).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) if err != nil { return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) @@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, var name string var data []byte if err = rows.Scan(&name, &data); err != nil { - glog.V(0).Infof("scan %s : %v", dirPath, err) + log.V(3).Infof("scan %s : %v", dirPath, err) return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err) } lastFileName = name @@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, FullPath: util.NewFullPath(string(dirPath), name), } if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { - glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) + log.V(3).Infof("scan decode %s : %v", entry.FullPath, err) return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) } diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go index 221902aaa..ef746feb9 100644 --- a/weed/filer/abstract_sql/abstract_sql_store_kv.go +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -6,7 +6,7 @@ import ( "encoding/base64" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "strings" ) @@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by } // now the insert failed possibly due to duplication constraints - glog.V(1).Infof("kv insert falls back to update: %s", err) + log.V(2).Infof("kv insert falls back to update: %s", err) res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr) if err != nil { diff --git a/weed/filer/arangodb/arangodb_store.go b/weed/filer/arangodb/arangodb_store.go index 457b5f28b..d1b77d36f 100644 --- a/weed/filer/arangodb/arangodb_store.go +++ b/weed/filer/arangodb/arangodb_store.go @@ -12,7 +12,7 @@ import ( "github.com/arangodb/go-driver" "github.com/arangodb/go-driver/http" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat if driver.IsNotFound(err) { return nil, filer_pb.ErrNotFound } - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } if len(data.Meta) == 0 { @@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP } _, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath))) if err != nil && !driver.IsNotFound(err) { - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return fmt.Errorf("delete %s : %v", fullpath, err) } return nil @@ -331,7 +331,7 @@ sort d.name asc converted := arrayToBytes(data.Meta) if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } diff --git a/weed/filer/arangodb/arangodb_store_bucket.go b/weed/filer/arangodb/arangodb_store_bucket.go index 44aeeadea..1beb3dd7c 100644 --- a/weed/filer/arangodb/arangodb_store_bucket.go +++ b/weed/filer/arangodb/arangodb_store_bucket.go @@ -7,7 +7,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var _ filer.BucketAware = (*ArangodbStore)(nil) @@ -18,7 +18,7 @@ func (store *ArangodbStore) OnBucketCreation(bucket string) { // create the collection && add to cache _, err := store.ensureBucket(timeout, bucket) if err != nil { - glog.Errorf("bucket create %s: %v", bucket, err) + log.Errorf("bucket create %s: %v", bucket, err) } } func (store *ArangodbStore) OnBucketDeletion(bucket string) { @@ -26,12 +26,12 @@ func (store *ArangodbStore) OnBucketDeletion(bucket string) { defer cancel() collection, err := store.ensureBucket(timeout, bucket) if err != nil { - glog.Errorf("bucket delete %s: %v", bucket, err) + log.Errorf("bucket delete %s: %v", bucket, err) return } err = collection.Remove(timeout) if err != nil && !driver.IsNotFound(err) { - glog.Errorf("bucket delete %s: %v", bucket, err) + log.Errorf("bucket delete %s: %v", bucket, err) return } store.mu.Lock() diff --git a/weed/filer/arangodb/arangodb_store_kv.go b/weed/filer/arangodb/arangodb_store_kv.go index 2ca85ccce..4b0a29e01 100644 --- a/weed/filer/arangodb/arangodb_store_kv.go +++ b/weed/filer/arangodb/arangodb_store_kv.go @@ -6,7 +6,7 @@ import ( "github.com/arangodb/go-driver" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { @@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte return nil, filer.ErrKvNotFound } if err != nil { - glog.Errorf("kv get: %s %v", string(key), err) + log.Errorf("kv get: %s %v", string(key), err) return nil, filer.ErrKvNotFound } return arrayToBytes(model.Meta), nil @@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) { _, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key))) if err != nil { - glog.Errorf("kv del: %v", err) + log.Errorf("kv del: %v", err) return filer.ErrKvNotFound } return nil diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go index 418812a47..84990acfc 100644 --- a/weed/filer/cassandra/cassandra_store.go +++ b/weed/filer/cassandra/cassandra_store.go @@ -8,7 +8,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -51,7 +51,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam } store.cluster.Keyspace = keyspace store.cluster.Timeout = time.Duration(timeout) * time.Millisecond - glog.V(0).Infof("timeout = %d", timeout) + log.V(3).Infof("timeout = %d", timeout) fallback := gocql.RoundRobinHostPolicy() if localDC != "" { fallback = gocql.DCAwareRoundRobinPolicy(localDC) @@ -61,7 +61,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam store.session, err = store.cluster.CreateSession() if err != nil { - glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) + log.V(3).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) } // set directory hash @@ -72,7 +72,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam dirHash := util.Md5String([]byte(dir))[:4] store.superLargeDirectoryHash[dir] = dirHash if existingDir, found := existingHash[dirHash]; found { - glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + log.Fatalf("directory %s has the same hash as %s", dir, existingDir) } existingHash[dirHash] = dir } @@ -202,7 +202,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { @@ -210,7 +210,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u } } if err = iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/cassandra2/cassandra_store.go b/weed/filer/cassandra2/cassandra_store.go index d0578669b..fa0015365 100644 --- a/weed/filer/cassandra2/cassandra_store.go +++ b/weed/filer/cassandra2/cassandra_store.go @@ -8,7 +8,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -51,7 +51,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna } store.cluster.Keyspace = keyspace store.cluster.Timeout = time.Duration(timeout) * time.Millisecond - glog.V(0).Infof("timeout = %d", timeout) + log.V(3).Infof("timeout = %d", timeout) fallback := gocql.RoundRobinHostPolicy() if localDC != "" { fallback = gocql.DCAwareRoundRobinPolicy(localDC) @@ -61,7 +61,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna store.session, err = store.cluster.CreateSession() if err != nil { - glog.V(0).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace) + log.V(3).Infof("Failed to open cassandra2 store, hosts %v, keyspace %s", hosts, keyspace) } // set directory hash @@ -72,7 +72,7 @@ func (store *Cassandra2Store) initialize(keyspace string, hosts []string, userna dirHash := util.Md5String([]byte(dir))[:4] store.superLargeDirectoryHash[dir] = dirHash if existingDir, found := existingHash[dirHash]; found { - glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + log.Fatalf("directory %s has the same hash as %s", dir, existingDir) } existingHash[dirHash] = dir } @@ -202,7 +202,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { @@ -210,7 +210,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath } } if err = iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go index db4af1559..b8e6c1007 100644 --- a/weed/filer/configuration.go +++ b/weed/filer/configuration.go @@ -1,7 +1,7 @@ package filer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "os" "reflect" @@ -22,10 +22,10 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { if config.GetBool(store.GetName() + ".enabled") { store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) if err := store.Initialize(config, store.GetName()+"."); err != nil { - glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + log.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) } isFresh = f.SetStore(store) - glog.V(0).Infof("configured filer store to %s", store.GetName()) + log.V(3).Infof("configured filer store to %s", store.GetName()) hasDefaultStoreConfigured = true break } @@ -70,16 +70,16 @@ func (f *Filer) LoadConfiguration(config *util.ViperProxy) (isFresh bool) { store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) if err := store.Initialize(config, key+"."); err != nil { - glog.Fatalf("Failed to initialize store for %s: %+v", key, err) + log.Fatalf("Failed to initialize store for %s: %+v", key, err) } location := config.GetString(key + ".location") if location == "" { - glog.Errorf("path-specific filer store needs %s", key+".location") + log.Errorf("path-specific filer store needs %s", key+".location") os.Exit(-1) } f.Store.AddPathSpecificStore(location, storeId, store) - glog.V(0).Infof("configure filer %s for %s", store.GetName(), location) + log.V(3).Infof("configure filer %s for %s", store.GetName(), location) } return @@ -92,7 +92,7 @@ func validateOneEnabledStore(config *util.ViperProxy) { if enabledStore == "" { enabledStore = store.GetName() } else { - glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) + log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) } } } diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go index bf9d3394e..f1d153739 100644 --- a/weed/filer/elastic/v7/elastic_store.go +++ b/weed/filer/elastic/v7/elastic_store.go @@ -12,7 +12,7 @@ import ( jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -70,7 +70,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre if store.maxPageSize <= 0 { store.maxPageSize = 10000 } - glog.Infof("filer store elastic endpoints: %v.", servers) + log.Infof("filer store elastic endpoints: %v.", servers) return store.initialize(options) } @@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) } value, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry marshal %v", err) } _, err = store.client.Index(). @@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) BodyJson(string(value)). Do(ctx) if err != nil { - glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) return fmt.Errorf("insert entry %v", err) } return nil @@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful err := jsoniter.Unmarshal(searchResult.Source, esEntry) return esEntry.Entry, err } - glog.Errorf("find entry(%s),%v.", string(fullpath), err) + log.Errorf("find entry(%s),%v.", string(fullpath), err) return nil, filer_pb.ErrNotFound } @@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { return nil } - glog.Errorf("delete index(%s) %v.", index, err) + log.Errorf("delete index(%s) %v.", index, err) return err } @@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e return nil } } - glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) return fmt.Errorf("delete entry %v", err) } func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool { if err := store.DeleteEntry(ctx, entry.FullPath); err != nil { - glog.Errorf("elastic delete %s: %v.", entry.FullPath, err) + log.Errorf("elastic delete %s: %v.", entry.FullPath, err) return false } return true @@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries( result := &elastic.SearchResult{} if (startFileName == "" && first) || inclusive { if result, err = store.search(ctx, index, parentId); err != nil { - glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } else { @@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries( } after := weed_util.Md5String([]byte(fullPath)) if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { - glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) return } } diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go index 86262bc0f..00f807394 100644 --- a/weed/filer/elastic/v7/elastic_store_kv.go +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -11,7 +11,7 @@ import ( jsoniter "github.com/json-iterator/go" elastic "github.com/olivere/elastic/v7" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) { @@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) return nil } } - glog.Errorf("delete key(id:%s) %v.", string(key), err) + log.Errorf("delete key(id:%s) %v.", string(key), err) return fmt.Errorf("delete key %v", err) } @@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, return esEntry.Value, nil } } - glog.Errorf("find key(%s),%v.", string(key), err) + log.Errorf("find key(%s),%v.", string(key), err) return value, filer.ErrKvNotFound } @@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) esEntry := &ESKVEntry{value} val, err := jsoniter.Marshal(esEntry) if err != nil { - glog.Errorf("insert key(%s) %v.", string(key), err) + log.Errorf("insert key(%s) %v.", string(key), err) return fmt.Errorf("insert key %v", err) } _, err = store.client.Index(). diff --git a/weed/filer/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go index fa2a72ca5..878451cca 100644 --- a/weed/filer/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -11,7 +11,7 @@ import ( "go.etcd.io/etcd/client/v3" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -73,7 +73,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix } func (store *EtcdStore) initialize(servers, username, password string, timeout time.Duration, tlsConfig *tls.Config) error { - glog.Infof("filer store etcd: %s", servers) + log.Infof("filer store etcd: %s", servers) client, err := clientv3.New(clientv3.Config{ Endpoints: strings.Split(servers, ","), @@ -95,7 +95,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t return fmt.Errorf("error checking etcd connection: %s", err) } - glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version) + log.V(3).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version) store.client = client return nil @@ -208,7 +208,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 36096d2c1..52c80cd57 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -12,7 +12,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" @@ -105,7 +105,7 @@ func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, c func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error { urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return err } err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0) @@ -118,7 +118,7 @@ func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFi func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) { urlStrings, err := lookupFileIdFn(fileId) if err != nil { - glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) return 0, err } return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset) @@ -158,7 +158,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri break } if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) + log.V(3).Infof("read %s failed, err: %v", urlString, err) } else { break } @@ -168,7 +168,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri break } if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) + log.V(3).Infof("retry reading in %v", waitTime) time.Sleep(waitTime) } else { break diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go index dfa971f86..d723c6b1b 100644 --- a/weed/filer/filechunks2_test.go +++ b/weed/filer/filechunks2_test.go @@ -6,7 +6,7 @@ import ( "slices" "testing" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -80,6 +80,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) { return int(a.Offset - b.Offset) }) for _, chunk := range chunks { - glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + log.V(3).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } } diff --git a/weed/filer/filer.go b/weed/filer/filer.go index acde49d54..204bf2b13 100644 --- a/weed/filer/filer.go +++ b/weed/filer/filer.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -92,7 +92,7 @@ func (f *Filer) MaybeBootstrapFromOnePeer(self pb.ServerAddress, existingNodes [ return } - glog.V(0).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId) + log.V(3).Infof("bootstrap from %v clientId:%d", earliestNode.Address, f.UniqueFilerId) return pb.WithFilerClient(false, f.UniqueFilerId, pb.ServerAddress(earliestNode.Address), f.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { return filer_pb.StreamBfs(client, "/", snapshotTime.UnixNano(), func(parentPath util.FullPath, entry *filer_pb.Entry) error { @@ -110,7 +110,7 @@ func (f *Filer) AggregateFromPeers(self pb.ServerAddress, existingNodes []*maste snapshot = append(snapshot, address) } f.Dlm.LockRing.SetSnapshot(snapshot) - glog.V(0).Infof("%s aggregate from peers %+v", self, snapshot) + log.V(3).Infof("%s aggregate from peers %+v", self, snapshot) f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption) f.MasterClient.SetOnPeerUpdateFn(func(update *master_pb.ClusterNodeUpdate, startFrom time.Time) { @@ -150,15 +150,15 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) (isFresh bool) { storeIdBytes = make([]byte, 4) util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { - glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) + log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) } - glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + log.V(3).Infof("create %s to %d", FilerStoreId, f.Signature) return true } else if err == nil && len(storeIdBytes) == 4 { f.Signature = int32(util.BytesToUint32(storeIdBytes)) - glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) + log.V(3).Infof("existing %s = %d", FilerStoreId, f.Signature) } else { - glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) + log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) } return false } @@ -201,7 +201,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr /* if !hasWritePermission(lastDirectoryEntry, entry) { - glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", + log.V(3).Infof("directory %s: %v, entry: uid=%d gid=%d", lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) } @@ -216,19 +216,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr } } - glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + log.V(-1).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) if err := f.Store.InsertEntry(ctx, entry); err != nil { - glog.Errorf("insert entry %s: %v", entry.FullPath, err) + log.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { if o_excl { - glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + log.V(0).Infof("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) } - glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + log.V(-1).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.Errorf("update entry %s: %v", entry.FullPath, err) + log.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } @@ -237,7 +237,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr f.deleteChunksIfNotNew(oldEntry, entry) - glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + log.V(-1).Infof("CreateEntry %s: created", entry.FullPath) return nil } @@ -252,7 +252,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di // fmt.Printf("%d dirPath: %+v\n", level, dirPath) // check the store directly - glog.V(4).Infof("find uncached directory: %s", dirPath) + log.V(-1).Infof("find uncached directory: %s", dirPath) dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) // no such existing directory @@ -287,11 +287,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di }, } - glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) + log.V(1).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.Store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil { - glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) + log.V(0).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -301,7 +301,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di } } else if !dirEntry.IsDirectory() { - glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -312,11 +312,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er if oldEntry != nil { entry.Attr.Crtime = oldEntry.Attr.Crtime if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", oldEntry.FullPath) + log.Errorf("existing %s is a directory", oldEntry.FullPath) return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", oldEntry.FullPath) + log.Errorf("existing %s is a file", oldEntry.FullPath) return fmt.Errorf("existing %s is a file", oldEntry.FullPath) } } diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go index e93279fba..be0a2a37f 100644 --- a/weed/filer/filer_conf.go +++ b/weed/filer/filer_conf.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/wdclient" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/viant/ptrie" @@ -68,7 +68,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { if err == filer_pb.ErrNotFound { return nil } - glog.Errorf("read filer conf entry %s: %v", filerConfPath, err) + log.Errorf("read filer conf entry %s: %v", filerConfPath, err) return } @@ -83,7 +83,7 @@ func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*file if len(content) == 0 { content, err = filer.readEntry(chunks, size) if err != nil { - glog.Errorf("read filer conf content: %v", err) + log.Errorf("read filer conf content: %v", err) return } } @@ -119,7 +119,7 @@ func (fc *FilerConf) GetLocationConf(locationPrefix string) (locConf *filer_pb.F func (fc *FilerConf) SetLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { - glog.Errorf("put location prefix: %v", err) + log.Errorf("put location prefix: %v", err) } return } @@ -132,7 +132,7 @@ func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err } err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) if err != nil { - glog.Errorf("put location prefix: %v", err) + log.Errorf("put location prefix: %v", err) } return } diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go index 0ae421981..96840281f 100644 --- a/weed/filer/filer_delete_entry.go +++ b/weed/filer/filer_delete_entry.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR return nil }) if err != nil { - glog.V(2).Infof("delete directory %s: %v", p, err) + log.V(1).Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } } @@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry for { entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") if err != nil { - glog.Errorf("list folder %s: %v", entry.FullPath, err) + log.Errorf("list folder %s: %v", entry.FullPath, err) return fmt.Errorf("list folder %s: %v", entry.FullPath, err) } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + log.V(1).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) } @@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } } - glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) + log.V(0).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { - glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + log.V(0).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -143,7 +143,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) { Name: collectionName, }) if err != nil { - glog.Infof("delete collection %s: %v", collectionName, err) + log.Infof("delete collection %s: %v", collectionName, err) } return err }) @@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) { func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { for _, hardLinkId := range hardLinkIds { if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { - glog.Errorf("delete hard link id %d : %v", hardLinkId, err) + log.Errorf("delete hard link id %d : %v", hardLinkId, err) } } } diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go index 362c7c51b..1ad60c2dd 100644 --- a/weed/filer/filer_deletion.go +++ b/weed/filer/filer_deletion.go @@ -7,7 +7,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/wdclient" @@ -58,10 +58,10 @@ func (f *Filer) loopProcessingDeletion() { _, err := operation.DeleteFileIdsWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) if err != nil { if !strings.Contains(err.Error(), storage.ErrorDeleted.Error()) { - glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + log.V(3).Infof("deleting fileIds len=%d error: %v", deletionCount, err) } } else { - glog.V(2).Infof("deleting fileIds %+v", toDeleteFileIds) + log.V(1).Infof("deleting fileIds %+v", toDeleteFileIds) } } }) @@ -92,7 +92,7 @@ func (f *Filer) doDeleteChunks(chunks []*filer_pb.FileChunk) { } dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) if manifestResolveErr != nil { - glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + log.V(3).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) } for _, dChunk := range dataChunks { f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) @@ -118,7 +118,7 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { toDelete, err := MinusChunks(f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks) if err != nil { - glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks) + log.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks) return } f.DeleteChunksNotRecursive(toDelete) diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go index 4c99da72a..165f7d52a 100644 --- a/weed/filer/filer_notify.go +++ b/weed/filer/filer_notify.go @@ -11,7 +11,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -56,10 +56,10 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry } if notification.Queue != nil { - glog.V(3).Infof("notifying entry update %v", fullpath) + log.V(0).Infof("notifying entry update %v", fullpath) if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil { // throw message - glog.Error(err) + log.Error(err) } } @@ -78,7 +78,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica } data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return } @@ -101,7 +101,7 @@ func (f *Filer) logFlushFunc(logBuffer *log_buffer.LogBuffer, startTime, stopTim for { if err := f.appendToFile(targetFile, buf); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) + log.V(3).Infof("metadata log write failed %s: %v", targetFile, err) time.Sleep(737 * time.Millisecond) } else { break diff --git a/weed/filer/filer_notify_read.go b/weed/filer/filer_notify_read.go index ac2c763e6..e47108348 100644 --- a/weed/filer/filer_notify_read.go +++ b/weed/filer/filer_notify_read.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/wdclient" "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -217,7 +217,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) { // println(" enqueue", tsMinute) t, parseErr := time.Parse("2006-01-02-15-04", tsMinute) if parseErr != nil { - glog.Errorf("failed to parse %s: %v", tsMinute, parseErr) + log.Errorf("failed to parse %s: %v", tsMinute, parseErr) continue } filerId := getFilerId(hourMinuteEntry.Name()) @@ -237,7 +237,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) { for filerId, entryName := range freshFilerIds { iter, found := v.perFilerIteratorMap[filerId] if !found { - glog.Errorf("Unexpected! failed to find iterator for filer %s", filerId) + log.Errorf("Unexpected! failed to find iterator for filer %s", filerId) continue } next, nextErr := iter.getNext(v) diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go index 6cec80148..680eb8c86 100644 --- a/weed/filer/filer_on_meta_event.go +++ b/weed/filer/filer_on_meta_event.go @@ -2,7 +2,7 @@ package filer import ( "bytes" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -43,7 +43,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR return } - glog.V(0).Infof("procesing %v", event) + log.V(3).Infof("procesing %v", event) if entry.Name == FilerConfName { f.reloadFilerConfiguration(entry) } @@ -62,7 +62,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { fc := NewFilerConf() err := fc.loadFromChunks(f, entry.Content, entry.GetChunks(), FileSize(entry)) if err != nil { - glog.Errorf("read filer conf chunks: %v", err) + log.Errorf("read filer conf chunks: %v", err) return } f.FilerConf = fc @@ -74,7 +74,7 @@ func (f *Filer) LoadFilerConf() { return fc.loadFromFiler(f) }) if err != nil { - glog.Errorf("read filer conf: %v", err) + log.Errorf("read filer conf: %v", err) return } f.FilerConf = fc @@ -85,7 +85,7 @@ func (f *Filer) LoadFilerConf() { // ////////////////////////////////// func (f *Filer) LoadRemoteStorageConfAndMapping() { if err := f.RemoteStorage.LoadRemoteStorageConfigurationsAndMapping(f); err != nil { - glog.Errorf("read remote conf and mapping: %v", err) + log.Errorf("read remote conf and mapping: %v", err) return } } diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go index 12402e82f..b469731be 100644 --- a/weed/filer/filerstore_hardlink.go +++ b/weed/filer/filerstore_hardlink.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -22,7 +22,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry } // check what is existing entry - // glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) + // log.V(-1).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) actualStore := fsw.getActualStore(entry.FullPath) existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath) if err != nil && err != filer_pb.ErrNotFound { @@ -31,7 +31,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry // remove old hard link if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 { - glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) + log.V(-1).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } @@ -50,7 +50,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err return encodeErr } - glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + log.V(-1).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return fsw.KvPut(ctx, key, newBlob) } @@ -63,16 +63,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr value, err := fsw.KvGet(ctx, key) if err != nil { - glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } if err = entry.DecodeAttributesAndChunks(value); err != nil { - glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) return err } - glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) + log.V(-1).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter) return nil } @@ -94,7 +94,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har entry.HardLinkCounter-- if entry.HardLinkCounter <= 0 { - glog.V(4).Infof("DeleteHardLink KvDelete %v", key) + log.V(-1).Infof("DeleteHardLink KvDelete %v", key) return fsw.KvDelete(ctx, key) } @@ -103,7 +103,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har return encodeErr } - glog.V(4).Infof("DeleteHardLink KvPut %v", key) + log.V(-1).Infof("DeleteHardLink KvPut %v", key) return fsw.KvPut(ctx, key, newBlob) } diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go index ebaf04065..ac22e416d 100644 --- a/weed/filer/filerstore_wrapper.go +++ b/weed/filer/filerstore_wrapper.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/viant/ptrie" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -79,7 +79,7 @@ func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, fsw.storeIdToStore[storeId] = NewFilerStorePathTranslator(path, store) err := fsw.pathToStore.Put([]byte(path), storeId) if err != nil { - glog.Fatalf("put path specific store: %v", err) + log.Fatalf("put path specific store: %v", err) } } @@ -128,7 +128,7 @@ func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) err return err } - // glog.V(4).Infof("InsertEntry %s", entry.FullPath) + // log.V(-1).Infof("InsertEntry %s", entry.FullPath) return actualStore.InsertEntry(ctx, entry) } @@ -149,7 +149,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err return err } - // glog.V(4).Infof("UpdateEntry %s", entry.FullPath) + // log.V(-1).Infof("UpdateEntry %s", entry.FullPath) return actualStore.UpdateEntry(ctx, entry) } @@ -162,7 +162,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) ( }() entry, err = actualStore.FindEntry(ctx, fp) - // glog.V(4).Infof("FindEntry %s: %v", fp, err) + // log.V(-1).Infof("FindEntry %s: %v", fp, err) if err != nil { if fsw.CanDropWholeBucket() && strings.Contains(err.Error(), "Table") && strings.Contains(err.Error(), "doesn't exist") { err = filer_pb.ErrNotFound @@ -192,14 +192,14 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) // remove hard link op := ctx.Value("OP") if op != "MV" { - glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } } } - // glog.V(4).Infof("DeleteEntry %s", fp) + // log.V(-1).Infof("DeleteEntry %s", fp) return actualStore.DeleteEntry(ctx, fp) } @@ -215,14 +215,14 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry // remove hard link op := ctx.Value("OP") if op != "MV" { - glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + log.V(-1).Infof("DeleteHardLink %s", existingEntry.FullPath) if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { return err } } } - // glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath) + // log.V(-1).Infof("DeleteOneEntry %s", existingEntry.FullPath) return actualStore.DeleteEntry(ctx, existingEntry.FullPath) } @@ -234,7 +234,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util. stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) }() - // glog.V(4).Infof("DeleteFolderChildren %s", fp) + // log.V(-1).Infof("DeleteFolderChildren %s", fp) return actualStore.DeleteFolderChildren(ctx, fp) } @@ -246,7 +246,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) }() - // glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) + // log.V(-1).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) filer_pb.AfterEntryDeserialization(entry.GetChunks()) @@ -264,7 +264,7 @@ func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, if limit > math.MaxInt32-1 { limit = math.MaxInt32 - 1 } - // glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) + // log.V(-1).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) adjustedEntryFunc := func(entry *Entry) bool { fsw.maybeReadHardLink(ctx, entry) filer_pb.AfterEntryDeserialization(entry.GetChunks()) diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go index 1a0e3c893..5915580c4 100644 --- a/weed/filer/hbase/hbase_store.go +++ b/weed/filer/hbase/hbase_store.go @@ -5,7 +5,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/tsuna/gohbase" @@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa } if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go index 7960bf476..5ae85c453 100644 --- a/weed/filer/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -13,7 +13,7 @@ import ( "os" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -44,7 +44,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *LevelDBStore) initialize(dir string) (err error) { - glog.V(0).Infof("filer store dir: %s", dir) + log.V(3).Infof("filer store dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -61,7 +61,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) { store.db, err = leveldb.RecoverFile(dir, opts) } if err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) + log.Infof("filer store open dir %s: %v", dir, err) return } } @@ -205,7 +205,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go index b465046f9..5a2902ee0 100644 --- a/weed/filer/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -15,7 +15,7 @@ import ( leveldb_util "github.com/syndtr/goleveldb/leveldb/util" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -40,7 +40,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr } func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { - glog.Infof("filer store leveldb2 dir: %s", dir) + log.Infof("filer store leveldb2 dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -61,7 +61,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { db, dbErr = leveldb.RecoverFile(dbFolder, opts) } if dbErr != nil { - glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + log.Errorf("filer store open dir %s: %v", dbFolder, dbErr) return dbErr } store.dbs = append(store.dbs, db) @@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go index 2522221da..06e3eb776 100644 --- a/weed/filer/leveldb3/leveldb3_store.go +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -17,7 +17,7 @@ import ( leveldb_util "github.com/syndtr/goleveldb/leveldb/util" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -47,7 +47,7 @@ func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, pr } func (store *LevelDB3Store) initialize(dir string) (err error) { - glog.Infof("filer store leveldb3 dir: %s", dir) + log.Infof("filer store leveldb3 dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -88,7 +88,7 @@ func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { db, dbErr = leveldb.RecoverFile(dbFolder, opts) } if dbErr != nil { - glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + log.Errorf("filer store open dir %s: %v", dbFolder, dbErr) return nil, dbErr } return db, nil @@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go index 976822ad1..9ce94ba05 100644 --- a/weed/filer/meta_aggregator.go +++ b/weed/filer/meta_aggregator.go @@ -14,7 +14,7 @@ import ( "google.golang.org/grpc" "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -73,23 +73,23 @@ func (ma *MetaAggregator) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, star func (ma *MetaAggregator) loopSubscribeToOneFiler(f *Filer, self pb.ServerAddress, peer pb.ServerAddress, startFrom time.Time, stopChan chan struct{}) { lastTsNs := startFrom.UnixNano() for { - glog.V(0).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("loopSubscribeToOneFiler read %s start from %v %d", peer, time.Unix(0, lastTsNs), lastTsNs) nextLastTsNs, err := ma.doSubscribeToOneFiler(f, self, peer, lastTsNs) // check stopChan to see if we should stop select { case <-stopChan: - glog.V(0).Infof("stop subscribing peer %s meta change", peer) + log.V(3).Infof("stop subscribing peer %s meta change", peer) return default: } if err != nil { - errLvl := glog.Level(0) + errLvl := log.Level(0) if strings.Contains(err.Error(), "duplicated local subscription detected") { - errLvl = glog.Level(4) + errLvl = log.Level(4) } - glog.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) + log.V(errLvl).Infof("subscribing remote %s meta change: %v", peer, err) } if lastTsNs < nextLastTsNs { lastTsNs = nextLastTsNs @@ -126,35 +126,35 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, defer func(prevTsNs int64) { if lastTsNs != prevTsNs && lastTsNs != lastPersistTime.UnixNano() { if err := ma.updateOffset(f, peer, peerSignature, lastTsNs); err == nil { - glog.V(0).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) } else { - glog.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.Errorf("failed to save last sync time with %s at %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) } } }(prevTsNs) } - glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + log.V(3).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) var counter int64 var synced bool maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { if err := Replay(f.Store, event); err != nil { - glog.Errorf("failed to reply metadata change from %v: %v", peer, err) + log.Errorf("failed to reply metadata change from %v: %v", peer, err) return } counter++ if lastPersistTime.Add(time.Minute).Before(time.Now()) { if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { - glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) + log.V(3).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) } else if !synced { synced = true - glog.V(0).Infof("synced with %s", peer) + log.V(3).Infof("synced with %s", peer) } lastPersistTime = time.Now() counter = 0 } else { - glog.V(0).Infof("failed to update offset for %v: %v", peer, err) + log.V(3).Infof("failed to update offset for %v: %v", peer, err) } } } @@ -163,7 +163,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { data, err := proto.Marshal(event) if err != nil { - glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) return err } dir := event.Directory @@ -175,7 +175,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } - glog.V(0).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) + log.V(3).Infof("subscribing remote %s meta change: %v, clientId:%d", peer, time.Unix(0, lastTsNs), ma.filer.UniqueFilerId) err = pb.WithFilerClient(true, 0, peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -188,7 +188,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, ClientEpoch: atomic.LoadInt32(&ma.filer.UniqueFilerEpoch), }) if err != nil { - glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err) + log.V(3).Infof("SubscribeLocalMetadata %v: %v", peer, err) return fmt.Errorf("subscribe: %v", err) } @@ -198,12 +198,12 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress, return nil } if listenErr != nil { - glog.V(0).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr) + log.V(3).Infof("SubscribeLocalMetadata stream %v: %v", peer, listenErr) return listenErr } if err := processEventFn(resp); err != nil { - glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err) + log.V(3).Infof("SubscribeLocalMetadata process %v: %v", resp, err) return fmt.Errorf("process %v: %v", resp, err) } @@ -248,7 +248,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer pb.ServerAddress, peerSignat lastTsNs = int64(util.BytesToUint64(value)) - glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs) + log.V(3).Infof("readOffset %s : %d", peer, lastTsNs) return } @@ -266,7 +266,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer pb.ServerAddress, peerSign return fmt.Errorf("updateOffset %s : %v", peer, err) } - glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs) + log.V(-1).Infof("updateOffset %s : %d", peer, lastTsNs) return } diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go index f6b009e92..26f5d65dd 100644 --- a/weed/filer/meta_replay.go +++ b/weed/filer/meta_replay.go @@ -4,7 +4,7 @@ import ( "context" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -15,7 +15,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err var newEntry *Entry if message.OldEntry != nil { oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) - glog.V(4).Infof("deleting %v", oldPath) + log.V(-1).Infof("deleting %v", oldPath) if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { return err } @@ -27,7 +27,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err dir = message.NewParentPath } key := util.NewFullPath(dir, message.NewEntry.Name) - glog.V(4).Infof("creating %v", key) + log.V(-1).Infof("creating %v", key) newEntry = FromPbEntry(dir, message.NewEntry) if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { return err diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go index c05ed20f0..94dc6928e 100644 --- a/weed/filer/mongodb/mongodb_store.go +++ b/weed/filer/mongodb/mongodb_store.go @@ -10,7 +10,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "go.mongodb.org/mongo-driver/bson" @@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("find %s: %v", fullpath, err) + log.Errorf("find %s: %v", fullpath, err) return nil, filer_pb.ErrNotFound } @@ -264,7 +264,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir lastFileName = data.Name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } @@ -275,7 +275,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir } if err := cur.Close(ctx); err != nil { - glog.V(0).Infof("list iterator close: %v", err) + log.V(3).Infof("list iterator close: %v", err) } return lastFileName, err diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go index f52dbfc03..0df4cbdd5 100644 --- a/weed/filer/mongodb/mongodb_store_kv.go +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -37,7 +37,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, var where = bson.M{"directory": dir, "name": name} err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) if err != mongo.ErrNoDocuments && err != nil { - glog.Errorf("kv get: %v", err) + log.Errorf("kv get: %v", err) return nil, filer.ErrKvNotFound } diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go index b87fa0411..24d1d81b3 100644 --- a/weed/filer/reader_at.go +++ b/weed/filer/reader_at.go @@ -7,7 +7,7 @@ import ( "math/rand" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/wdclient" @@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp locations = resp.LocationsMap[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", fileId) + log.V(3).Infof("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId) } vicCacheLock.Lock() @@ -113,7 +113,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.chunkViews.Lock.RLock() defer c.chunkViews.Lock.RUnlock() - // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + // log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) n, _, err = c.doReadAt(p, offset) return } @@ -125,7 +125,7 @@ func (c *ChunkReadAt) ReadAtWithTime(p []byte, offset int64) (n int, ts int64, e c.chunkViews.Lock.RLock() defer c.chunkViews.Lock.RUnlock() - // glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + // log.V(-1).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) return c.doReadAt(p, offset) } @@ -143,7 +143,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err } if startOffset < chunk.ViewOffset { gap := chunk.ViewOffset - startOffset - glog.V(4).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) + log.V(-1).Infof("zero [%d,%d)", startOffset, chunk.ViewOffset) n += zero(p, startOffset-offset, gap) startOffset, remaining = chunk.ViewOffset, remaining-gap if remaining <= 0 { @@ -155,12 +155,12 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err if chunkStart >= chunkStop { continue } - // glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) + // log.V(-1).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.ViewOffset-chunk.Offset, chunk.ViewOffset-chunk.Offset+int64(chunk.ViewSize)) bufferOffset := chunkStart - chunk.ViewOffset + chunk.OffsetInChunk ts = chunk.ModifiedTsNs copied, err := c.readChunkSliceAt(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], chunk, nextChunks, uint64(bufferOffset)) if err != nil { - glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + log.Errorf("fetching chunk %+v: %v\n", chunk, err) return copied, ts, err } @@ -168,7 +168,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) } - // glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) + // log.V(-1).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) // zero the remaining bytes if a gap exists at the end of the last chunk (or a fully sparse file) if err == nil && remaining > 0 { @@ -178,7 +178,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, ts int64, err err startOffset -= offset } if delta > 0 { - glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize) + log.V(-1).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+delta, c.fileSize) n += zero(p, startOffset, delta) } } diff --git a/weed/filer/reader_cache.go b/weed/filer/reader_cache.go index 2ef81a931..53308f466 100644 --- a/weed/filer/reader_cache.go +++ b/weed/filer/reader_cache.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "github.com/seaweedfs/seaweedfs/weed/util/mem" @@ -63,7 +63,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { continue } if rc.chunkCache.IsInCache(chunkView.FileId, true) { - glog.V(4).Infof("%s is in cache", chunkView.FileId) + log.V(-1).Infof("%s is in cache", chunkView.FileId) continue } @@ -72,7 +72,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) { return } - // glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) + // log.V(-1).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset) // cache this chunk if not yet shouldCache := (uint64(chunkView.ViewOffset) + chunkView.ChunkSize) <= rc.chunkCache.GetMaxFilePartSizeInCache() cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), shouldCache) @@ -118,7 +118,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt } } - // glog.V(4).Infof("cache1 %s", fileId) + // log.V(-1).Infof("cache1 %s", fileId) cacher := newSingleChunkCacher(rc, fileId, cipherKey, isGzipped, chunkSize, shouldCache) go cacher.startCaching() @@ -132,7 +132,7 @@ func (rc *ReaderCache) ReadChunkAt(buffer []byte, fileId string, cipherKey []byt func (rc *ReaderCache) UnCache(fileId string) { rc.Lock() defer rc.Unlock() - // glog.V(4).Infof("uncache %s", fileId) + // log.V(-1).Infof("uncache %s", fileId) if downloader, found := rc.downloaders[fileId]; found { downloader.destroy() delete(rc.downloaders, fileId) diff --git a/weed/filer/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go index 5e7c850d0..2fa377512 100644 --- a/weed/filer/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -10,7 +10,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go index 5e7bc019e..8f6fbeadf 100644 --- a/weed/filer/redis2/redis_store.go +++ b/weed/filer/redis2/redis_store.go @@ -8,7 +8,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -41,22 +41,22 @@ func (store *Redis2Store) initialize(hostPort string, password string, database if enableMtls { clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) + log.Fatalf("Error loading client certificate and key pair: %v", err) } caCertBytes, err := os.ReadFile(caCertPath) if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) + log.Fatalf("Error reading CA certificate file: %v", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") + log.Fatalf("Error appending CA certificate to pool") } redisHost, _, err := net.SplitHostPort(hostPort) if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) + log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) } tlsConfig := &tls.Config{ diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go index d3f01f88a..5a956e9f5 100644 --- a/weed/filer/redis2/universal_redis_store.go +++ b/weed/filer/redis2/universal_redis_store.go @@ -8,7 +8,7 @@ import ( "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/redis3/item_list_serde.go b/weed/filer/redis3/item_list_serde.go index f4410b61b..f394c7004 100644 --- a/weed/filer/redis3/item_list_serde.go +++ b/weed/filer/redis3/item_list_serde.go @@ -2,7 +2,7 @@ package redis3 import ( "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/skiplist" "google.golang.org/protobuf/proto" ) @@ -22,7 +22,7 @@ func LoadItemList(data []byte, prefix string, client redis.UniversalClient, stor message := &skiplist.SkipListProto{} if err := proto.Unmarshal(data, message); err != nil { - glog.Errorf("loading skiplist: %v", err) + log.Errorf("loading skiplist: %v", err) } nl.skipList.MaxNewLevel = int(message.MaxNewLevel) nl.skipList.MaxLevel = int(message.MaxLevel) @@ -69,7 +69,7 @@ func (nl *ItemList) ToBytes() []byte { } data, err := proto.Marshal(message) if err != nil { - glog.Errorf("marshal skiplist: %v", err) + log.Errorf("marshal skiplist: %v", err) } return data } diff --git a/weed/filer/redis3/kv_directory_children.go b/weed/filer/redis3/kv_directory_children.go index 5a2d76141..9738b80c4 100644 --- a/weed/filer/redis3/kv_directory_children.go +++ b/weed/filer/redis3/kv_directory_children.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) const maxNameBatchSizeLimit = 1000000 @@ -31,7 +31,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit) if err := nameList.WriteName(name); err != nil { - glog.Errorf("add %s %s: %v", key, name, err) + log.Errorf("add %s %s: %v", key, name, err) return err } @@ -100,7 +100,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s if err = nameList.ListNames("", func(name string) bool { if err := onDeleteFn(name); err != nil { - glog.Errorf("delete %s child %s: %v", key, name, err) + log.Errorf("delete %s child %s: %v", key, name, err) return false } return true diff --git a/weed/filer/redis3/redis_store.go b/weed/filer/redis3/redis_store.go index 3bb0ce46f..d5c4f9b87 100644 --- a/weed/filer/redis3/redis_store.go +++ b/weed/filer/redis3/redis_store.go @@ -10,7 +10,7 @@ import ( "github.com/go-redsync/redsync/v4/redis/goredis/v9" "github.com/redis/go-redis/v9" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -42,22 +42,22 @@ func (store *Redis3Store) initialize(hostPort string, password string, database if enableMtls { clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - glog.Fatalf("Error loading client certificate and key pair: %v", err) + log.Fatalf("Error loading client certificate and key pair: %v", err) } caCertBytes, err := os.ReadFile(caCertPath) if err != nil { - glog.Fatalf("Error reading CA certificate file: %v", err) + log.Fatalf("Error reading CA certificate file: %v", err) } caCertPool := x509.NewCertPool() if ok := caCertPool.AppendCertsFromPEM(caCertBytes); !ok { - glog.Fatalf("Error appending CA certificate to pool") + log.Fatalf("Error appending CA certificate to pool") } redisHost, _, err := net.SplitHostPort(hostPort) if err != nil { - glog.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) + log.Fatalf("Error parsing redis host and port from %s: %v", hostPort, err) } tlsConfig := &tls.Config{ diff --git a/weed/filer/redis3/skiplist_element_store.go b/weed/filer/redis3/skiplist_element_store.go index 46506187e..551aa463d 100644 --- a/weed/filer/redis3/skiplist_element_store.go +++ b/weed/filer/redis3/skiplist_element_store.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/redis/go-redis/v9" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util/skiplist" "google.golang.org/protobuf/proto" ) @@ -27,7 +27,7 @@ func (m *SkipListElementStore) SaveElement(id int64, element *skiplist.SkipListE key := fmt.Sprintf("%s%d", m.Prefix, id) data, err := proto.Marshal(element) if err != nil { - glog.Errorf("marshal %s: %v", key, err) + log.Errorf("marshal %s: %v", key, err) } return m.client.Set(context.Background(), key, data, 0).Err() } diff --git a/weed/filer/redis3/universal_redis_store.go b/weed/filer/redis3/universal_redis_store.go index 51675d971..c0d0ef7af 100644 --- a/weed/filer/redis3/universal_redis_store.go +++ b/weed/filer/redis3/universal_redis_store.go @@ -9,7 +9,7 @@ import ( redsync "github.com/go-redsync/redsync/v4" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { return true } diff --git a/weed/filer/redis_lua/universal_redis_store.go b/weed/filer/redis_lua/universal_redis_store.go index 9e8dbcda7..c45200b4e 100644 --- a/weed/filer/redis_lua/universal_redis_store.go +++ b/weed/filer/redis_lua/universal_redis_store.go @@ -9,7 +9,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer/redis_lua/stored_procedure" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d entry, err := store.FindEntry(ctx, path) lastFileName = fileName if err != nil { - glog.V(0).Infof("list %s : %v", path, err) + log.V(3).Infof("list %s : %v", path, err) if err == filer_pb.ErrNotFound { continue } diff --git a/weed/filer/remote_storage.go b/weed/filer/remote_storage.go index 3764fbac6..002840d52 100644 --- a/weed/filer/remote_storage.go +++ b/weed/filer/remote_storage.go @@ -12,7 +12,7 @@ import ( "math" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/viant/ptrie" ) @@ -43,7 +43,7 @@ func (rs *FilerRemoteStorage) LoadRemoteStorageConfigurationsAndMapping(filer *F if err == filer_pb.ErrNotFound { return nil } - glog.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err) + log.Errorf("read remote storage %s: %v", DirectoryEtcRemote, err) return } @@ -125,7 +125,7 @@ func UnmarshalRemoteStorageMappings(oldContent []byte) (mappings *remote_pb.Remo } if len(oldContent) > 0 { if err = proto.Unmarshal(oldContent, mappings); err != nil { - glog.Warningf("unmarshal existing mappings: %v", err) + log.Warningf("unmarshal existing mappings: %v", err) } } return diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go index f860f528a..489144f48 100644 --- a/weed/filer/rocksdb/rocksdb_store.go +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -14,7 +14,7 @@ import ( gorocksdb "github.com/linxGnu/grocksdb" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_util "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -60,7 +60,7 @@ func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, pre } func (store *RocksDBStore) initialize(dir string) (err error) { - glog.Infof("filer store rocksdb dir: %s", dir) + log.Infof("filer store rocksdb dir: %s", dir) os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) @@ -262,7 +262,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) return false } if !eachEntryFunc(entry) { diff --git a/weed/filer/stream.go b/weed/filer/stream.go index 2f55e3e44..f6a079a23 100644 --- a/weed/filer/stream.go +++ b/weed/filer/stream.go @@ -11,7 +11,7 @@ import ( "slices" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -81,7 +81,7 @@ func noJwtFunc(string) string { } func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) { - glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks)) + log.V(-1).Infof("prepare to stream content for chunks: %d", len(chunks)) chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) fileId2Url := make(map[string][]string) @@ -95,15 +95,15 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc if err == nil && len(urlStrings) > 0 { break } - glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId) + log.V(-1).Infof("waiting for chunk: %s", chunkView.FileId) time.Sleep(backoff) } if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return nil, err } else if len(urlStrings) == 0 { errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) - glog.Error(errUrlNotFound) + log.Error(errUrlNotFound) return nil, errUrlNotFound } fileId2Url[chunkView.FileId] = urlStrings @@ -117,7 +117,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc if offset < chunkView.ViewOffset { gap := chunkView.ViewOffset - offset remaining -= gap - glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) + log.V(-1).Infof("zero [%d,%d)", offset, chunkView.ViewOffset) err := writeZero(writer, gap) if err != nil { return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset) @@ -139,7 +139,7 @@ func PrepareStreamContentWithThrottler(masterClient wdclient.HasLookupFileIdFunc downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize)) } if remaining > 0 { - glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining) + log.V(-1).Infof("zero [%d,%d)", offset, offset+remaining) err := writeZero(writer, remaining) if err != nil { return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining) @@ -191,7 +191,7 @@ func ReadAll(buffer []byte, masterClient *wdclient.MasterClient, chunks []*filer chunkView := x.Value urlStrings, err := lookupFileIdFn(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } @@ -319,13 +319,13 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { if c.bufferOffset <= offset && offset < c.bufferOffset+int64(len(c.buffer)) { return nil } - // glog.V(2).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize) + // log.V(1).Infof("c.chunkView: %v buffer:[%d,%d) offset:%d totalSize:%d", c.chunkView, c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), offset, c.totalSize) // find a possible chunk view p := c.chunkView for p != nil { chunk := p.Value - // glog.V(2).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) + // log.V(1).Infof("prepareBufferFor check chunk:[%d,%d)", chunk.ViewOffset, chunk.ViewOffset+int64(chunk.ViewSize)) if insideChunk(offset, chunk) { if c.isBufferEmpty() || c.bufferOffset != chunk.ViewOffset { c.chunkView = p @@ -345,7 +345,7 @@ func (c *ChunkStreamReader) prepareBufferFor(offset int64) (err error) { func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { urlStrings, err := c.lookupFileId(chunkView.FileId) if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } var buffer bytes.Buffer @@ -358,7 +358,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { break } if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + log.V(2).Infof("read %s failed, err: %v", chunkView.FileId, err) buffer.Reset() } else { break @@ -371,7 +371,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { c.bufferOffset = chunkView.ViewOffset c.chunk = chunkView.FileId - // glog.V(0).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) + // log.V(3).Infof("fetched %s [%d,%d)", chunkView.FileId, chunkView.ViewOffset, chunkView.ViewOffset+int64(chunkView.ViewSize)) return nil } diff --git a/weed/filer/tarantool/tarantool_store.go b/weed/filer/tarantool/tarantool_store.go index 8d19db60d..951bfd3e2 100644 --- a/weed/filer/tarantool/tarantool_store.go +++ b/weed/filer/tarantool/tarantool_store.go @@ -11,7 +11,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" weed_util "github.com/seaweedfs/seaweedfs/weed/util" @@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w } if len(results) < 1 { - glog.Errorf("Can't find results, data is empty") + log.Errorf("Can't find results, data is empty") return } rows, ok := results[0].([]interface{}) if !ok { - glog.Errorf("Can't convert results[0] to list") + log.Errorf("Can't convert results[0] to list") return } for _, result := range rows { row, ok := result.([]interface{}) if !ok { - glog.Errorf("Can't convert result to list") + log.Errorf("Can't convert result to list") return } if len(row) < 5 { - glog.Errorf("Length of result is less than needed: %v", len(row)) + log.Errorf("Length of result is less than needed: %v", len(row)) return } nameRaw := row[2] name, ok := nameRaw.(string) if !ok { - glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) + log.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw) return } dataRaw := row[4] data, ok := dataRaw.(string) if !ok { - glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) + log.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw) return } @@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w lastFileName = name if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if !eachEntryFunc(entry) { diff --git a/weed/filer/tikv/tikv_store.go b/weed/filer/tikv/tikv_store.go index 8187375ca..17b8166ef 100644 --- a/weed/filer/tikv/tikv_store.go +++ b/weed/filer/tikv/tikv_store.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/tikv/client-go/v2/config" @@ -66,7 +66,7 @@ func (store *TikvStore) initialize(ca, cert, key string, verify_cn, pdAddrs []st func (store *TikvStore) Shutdown() { err := store.client.Close() if err != nil { - glog.V(0).Infof("Shutdown TiKV client got error: %v", err) + log.V(3).Infof("Shutdown TiKV client got error: %v", err) } } @@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat // println("list", entry.FullPath, "chunks", len(entry.GetChunks())) if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) + log.V(3).Infof("list %s : %v", entry.FullPath, err) break } if err := iter.Next(); !eachEntryFunc(entry) || err != nil { diff --git a/weed/filer/ydb/ydb_store.go b/weed/filer/ydb/ydb_store.go index a9ad6666e..cd93b512f 100644 --- a/weed/filer/ydb/ydb_store.go +++ b/weed/filer/ydb/ydb_store.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" environ "github.com/ydb-platform/ydb-go-sdk-auth-environ" @@ -69,7 +69,7 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix store.dirBuckets = dirBuckets store.SupportBucketTable = useBucketPrefix if store.SupportBucketTable { - glog.V(0).Infof("enabled BucketPrefix") + log.V(3).Infof("enabled BucketPrefix") } store.dbs = make(map[string]bool) ctx, cancel := context.WithCancel(context.Background()) @@ -203,7 +203,7 @@ func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) dir, name := fullpath.DirAndName() tablePathPrefix, shortDir := store.getPrefix(ctx, &dir) query := withPragma(tablePathPrefix, deleteQuery) - glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir) + log.V(-1).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), table.ValueParam("$name", types.UTF8Value(name))) @@ -251,7 +251,7 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath if chunkLimit > maxChunk { chunkLimit = maxChunk } - glog.V(4).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit) + log.V(-1).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit) queryParams := table.NewQueryParameters( table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))), @@ -268,14 +268,14 @@ func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath return nil } truncated = res.CurrentResultSet().Truncated() - glog.V(4).Infof("truncated %v, entryCount %d", truncated, entryCount) + log.V(-1).Infof("truncated %v, entryCount %d", truncated, entryCount) for res.NextRow() { if err := res.ScanNamed( named.OptionalWithDefault("name", &name), named.OptionalWithDefault("meta", &data)); err != nil { return fmt.Errorf("list scanNamed %s : %v", dir, err) } - glog.V(8).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name)) + log.V(-1).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name)) lastFileName = name entry := &filer.Entry{ FullPath: util.NewFullPath(dir, name), @@ -345,7 +345,7 @@ func (store *YdbStore) OnBucketCreation(bucket string) { defer store.dbsLock.Unlock() if err := store.createTable(context.Background(), prefix); err != nil { - glog.Errorf("createTable %s: %v", prefix, err) + log.Errorf("createTable %s: %v", prefix, err) } if store.dbs == nil { @@ -362,14 +362,14 @@ func (store *YdbStore) OnBucketDeletion(bucket string) { defer store.dbsLock.Unlock() prefix := path.Join(store.tablePathPrefix, bucket) - glog.V(4).Infof("deleting table %s", prefix) + log.V(-1).Infof("deleting table %s", prefix) if err := store.deleteTable(context.Background(), prefix); err != nil { - glog.Errorf("deleteTable %s: %v", prefix, err) + log.Errorf("deleteTable %s: %v", prefix, err) } if err := store.DB.Scheme().RemoveDirectory(context.Background(), prefix); err != nil { - glog.Errorf("remove directory %s: %v", prefix, err) + log.Errorf("remove directory %s: %v", prefix, err) } if store.dbs == nil { @@ -393,7 +393,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error { }); err != nil { return err } - glog.V(4).Infof("deleted table %s", prefix) + log.V(-1).Infof("deleted table %s", prefix) return nil } @@ -406,11 +406,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre } prefixBuckets := store.dirBuckets + "/" - glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets) + log.V(-1).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets) if strings.HasPrefix(*dir, prefixBuckets) { // detect bucket bucketAndDir := (*dir)[len(prefixBuckets):] - glog.V(4).Infof("bucketAndDir: %s", bucketAndDir) + log.V(-1).Infof("bucketAndDir: %s", bucketAndDir) var bucket string if t := strings.Index(bucketAndDir, "/"); t > 0 { bucket = bucketAndDir[:t] @@ -428,9 +428,9 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre if _, found := store.dbs[bucket]; !found { if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil { store.dbs[bucket] = true - glog.V(4).Infof("created table %s", tablePathPrefixWithBucket) + log.V(-1).Infof("created table %s", tablePathPrefixWithBucket) } else { - glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err) + log.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err) } } tablePathPrefix = &tablePathPrefixWithBucket @@ -441,7 +441,7 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre func (store *YdbStore) ensureTables(ctx context.Context) error { prefixFull := store.tablePathPrefix - glog.V(4).Infof("creating base table %s", prefixFull) + log.V(-1).Infof("creating base table %s", prefixFull) baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE) if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { return s.CreateTable(ctx, baseTable, createTableOptions()...) @@ -449,17 +449,17 @@ func (store *YdbStore) ensureTables(ctx context.Context) error { return fmt.Errorf("failed to create base table %s: %v", baseTable, err) } - glog.V(4).Infof("creating bucket tables") + log.V(-1).Infof("creating bucket tables") if store.SupportBucketTable { store.dbsLock.Lock() defer store.dbsLock.Unlock() for bucket := range store.dbs { - glog.V(4).Infof("creating bucket table %s", bucket) + log.V(-1).Infof("creating bucket table %s", bucket) bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE) if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error { return s.CreateTable(ctx, bucketTable, createTableOptions()...) }); err != nil { - glog.Errorf("failed to create bucket table %s: %v", bucketTable, err) + log.Errorf("failed to create bucket table %s: %v", bucketTable, err) } } } diff --git a/weed/filer_client/filer_client_accessor.go b/weed/filer_client/filer_client_accessor.go index 20646d343..228e01c5c 100644 --- a/weed/filer_client/filer_client_accessor.go +++ b/weed/filer_client/filer_client_accessor.go @@ -1,7 +1,7 @@ package filer_client import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -20,7 +20,7 @@ func (fca *FilerClientAccessor) WithFilerClient(streamingMode bool, fn func(file func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb.ConfigureTopicResponse) error { - glog.V(0).Infof("save conf for topic %v to filer", t) + log.V(3).Infof("save conf for topic %v to filer", t) // save the topic configuration on filer return fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { @@ -30,7 +30,7 @@ func (fca *FilerClientAccessor) SaveTopicConfToFiler(t topic.Topic, conf *mq_pb. func (fca *FilerClientAccessor) ReadTopicConfFromFiler(t topic.Topic) (conf *mq_pb.ConfigureTopicResponse, err error) { - glog.V(1).Infof("load conf for topic %v from filer", t) + log.V(2).Infof("load conf for topic %v from filer", t) if err = fca.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { conf, err = t.ReadConfFile(client) diff --git a/weed/glog/glog.go b/weed/glog/glog.go index 754c3ac36..069d6b292 100644 --- a/weed/glog/glog.go +++ b/weed/glog/glog.go @@ -20,17 +20,17 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// log.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// log.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if log.V(1) { +// log.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// log.V(1).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. @@ -740,7 +740,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when log.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -989,11 +989,11 @@ type Verbose bool // and Infof. These methods will write to the Info log if called. // Thus, one may write either // -// if glog.V(2) { glog.Info("log this") } +// if log.V(1) { log.Info("log this") } // // or // -// glog.V(2).Info("log this") +// log.V(1).Info("log this") // // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go index c8eac8ef6..4dee419aa 100644 --- a/weed/iamapi/iamapi_handlers.go +++ b/weed/iamapi/iamapi_handlers.go @@ -4,7 +4,7 @@ import ( "net/http" "github.com/aws/aws-sdk-go/service/iam" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" ) @@ -20,13 +20,13 @@ func writeIamErrorResponse(w http.ResponseWriter, r *http.Request, iamError *Iam if iamError == nil { // Do nothing if there is no error - glog.Errorf("No error found") + log.Errorf("No error found") return } errCode := iamError.Code errMsg := iamError.Error.Error() - glog.Errorf("Response %+v", errMsg) + log.Errorf("Response %+v", errMsg) errorResp := newErrorResponse(errCode, errMsg) internalErrorResponse := newErrorResponse(iam.ErrCodeServiceFailureException, "Internal server error") diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go index 094ca2332..70d08864b 100644 --- a/weed/iamapi/iamapi_management_handlers.go +++ b/weed/iamapi/iamapi_management_handlers.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" @@ -245,7 +245,7 @@ func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values return PutUserPolicyResponse{}, &IamError{Code: iam.ErrCodeMalformedPolicyDocumentException, Error: err} } // Log the actions - glog.V(3).Infof("PutUserPolicy: actions=%v", actions) + log.V(0).Infof("PutUserPolicy: actions=%v", actions) for _, ident := range s3cfg.Identities { if userName != ident.Name { continue @@ -332,14 +332,14 @@ func GetActions(policy *PolicyDocument) ([]string, error) { // Parse "arn:aws:s3:::my-bucket/shared/*" res := strings.Split(resource, ":") if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" { - glog.Infof("not a valid resource: %s", res) + log.Infof("not a valid resource: %s", res) continue } for _, action := range statement.Action { // Parse "s3:Get*" act := strings.Split(action, ":") if len(act) != 2 || act[0] != "s3" { - glog.Infof("not a valid action: %s", act) + log.Infof("not a valid action: %s", act) continue } statementAction := MapToStatementAction(act[1]) @@ -423,7 +423,7 @@ func handleImplicitUsername(r *http.Request, values url.Values) { // "AWS4-HMAC-SHA256 Credential=197FSAQ7HHTA48X64O3A/20220420/test1/iam/aws4_request, SignedHeaders=content-type; // host;x-amz-date, Signature=6757dc6b3d7534d67e17842760310e99ee695408497f6edc4fdb84770c252dc8", // the "test1" will be extracted as the username - glog.V(4).Infof("Authorization field: %v", r.Header["Authorization"][0]) + log.V(-1).Infof("Authorization field: %v", r.Header["Authorization"][0]) s := strings.Split(r.Header["Authorization"][0], "Credential=") if len(s) < 2 { return @@ -452,7 +452,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { return } - glog.V(4).Infof("DoActions: %+v", values) + log.V(-1).Infof("DoActions: %+v", values) var response interface{} var iamError *IamError changed := true @@ -477,7 +477,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { case "UpdateUser": response, iamError = iama.UpdateUser(s3cfg, values) if iamError != nil { - glog.Errorf("UpdateUser: %+v", iamError.Error) + log.Errorf("UpdateUser: %+v", iamError.Error) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } @@ -497,7 +497,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { case "CreatePolicy": response, iamError = iama.CreatePolicy(s3cfg, values) if iamError != nil { - glog.Errorf("CreatePolicy: %+v", iamError.Error) + log.Errorf("CreatePolicy: %+v", iamError.Error) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } @@ -505,7 +505,7 @@ func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { var iamError *IamError response, iamError = iama.PutUserPolicy(s3cfg, values) if iamError != nil { - glog.Errorf("PutUserPolicy: %+v", iamError.Error) + log.Errorf("PutUserPolicy: %+v", iamError.Error) writeIamErrorResponse(w, r, iamError) return diff --git a/weed/images/cropping.go b/weed/images/cropping.go index 8f9525d1a..e309fcfe7 100644 --- a/weed/images/cropping.go +++ b/weed/images/cropping.go @@ -10,13 +10,13 @@ import ( "github.com/cognusion/imaging" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.ReadSeeker, err error) { srcImage, _, err := image.Decode(read) if err != nil { - glog.Error(err) + log.Error(err) return read, err } @@ -32,15 +32,15 @@ func Cropped(ext string, read io.ReadSeeker, x1, y1, x2, y2 int) (cropped io.Rea switch ext { case ".jpg", ".jpeg": if err = jpeg.Encode(&buf, dstImage, nil); err != nil { - glog.Error(err) + log.Error(err) } case ".png": if err = png.Encode(&buf, dstImage); err != nil { - glog.Error(err) + log.Error(err) } case ".gif": if err = gif.Encode(&buf, dstImage, nil); err != nil { - glog.Error(err) + log.Error(err) } } return bytes.NewReader(buf.Bytes()), err diff --git a/weed/images/resizing.go b/weed/images/resizing.go index aee096cfb..845492f98 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -10,7 +10,7 @@ import ( "github.com/cognusion/imaging" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" _ "golang.org/x/image/webp" ) @@ -55,7 +55,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re } return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy() } else { - glog.Error(err) + log.Error(err) } return read, 0, 0 } diff --git a/weed/mount/dirty_pages_chunked.go b/weed/mount/dirty_pages_chunked.go index 25b071e7d..f178023ce 100644 --- a/weed/mount/dirty_pages_chunked.go +++ b/weed/mount/dirty_pages_chunked.go @@ -5,7 +5,7 @@ import ( "io" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount/page_writer" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -41,7 +41,7 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages { func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { pages.hasWrites = true - glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data))) + log.V(-1).Infof("%v memory AddPage [%d, %d)", pages.fh.fh, offset, offset+int64(len(data))) pages.uploadPipeline.SaveDataAt(data, offset, isSequential, tsNs) return @@ -73,13 +73,13 @@ func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reade fileName := fileFullPath.Name() chunk, err := pages.fh.wfs.saveDataAsChunk(fileFullPath)(reader, fileName, offset, modifiedTsNs) if err != nil { - glog.V(0).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err) + log.V(3).Infof("%v saveToStorage [%d,%d): %v", fileFullPath, offset, offset+size, err) pages.lastErr = err return } pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) pages.fh.entryChunkGroup.AddChunk(chunk) - glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) + log.V(0).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) } diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index f47d4a877..ba4703665 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -2,7 +2,7 @@ package mount import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "os" @@ -77,10 +77,10 @@ func (fh *FileHandle) SetEntry(entry *filer_pb.Entry) { var resolveManifestErr error fh.entryChunkGroup, resolveManifestErr = filer.NewChunkGroup(fh.wfs.LookupFn(), fh.wfs.chunkCache, entry.Chunks) if resolveManifestErr != nil { - glog.Warningf("failed to resolve manifest chunks in %+v", entry) + log.Warningf("failed to resolve manifest chunks in %+v", entry) } } else { - glog.Fatalf("setting file handle entry to nil") + log.Fatalf("setting file handle entry to nil") } fh.entry.SetEntry(entry) } diff --git a/weed/mount/filehandle_read.go b/weed/mount/filehandle_read.go index ce5f96341..c455b3624 100644 --- a/weed/mount/filehandle_read.go +++ b/weed/mount/filehandle_read.go @@ -6,7 +6,7 @@ import ( "io" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -31,10 +31,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, e entry := fh.GetEntry() if entry.IsInRemoteOnly() { - glog.V(4).Infof("download remote entry %s", fileFullPath) + log.V(-1).Infof("download remote entry %s", fileFullPath) err := fh.downloadRemoteEntry(entry) if err != nil { - glog.V(1).Infof("download remote entry %s: %v", fileFullPath, err) + log.V(2).Infof("download remote entry %s: %v", fileFullPath, err) return 0, 0, err } } @@ -45,28 +45,28 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, int64, e } if fileSize == 0 { - glog.V(1).Infof("empty fh %v", fileFullPath) + log.V(2).Infof("empty fh %v", fileFullPath) return 0, 0, io.EOF } else if offset == fileSize { return 0, 0, io.EOF } else if offset >= fileSize { - glog.V(1).Infof("invalid read, fileSize %d, offset %d for %s", fileSize, offset, fileFullPath) + log.V(2).Infof("invalid read, fileSize %d, offset %d for %s", fileSize, offset, fileFullPath) return 0, 0, io.EOF } if offset < int64(len(entry.Content)) { totalRead := copy(buff, entry.Content[offset:]) - glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) + log.V(-1).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) return int64(totalRead), 0, nil } totalRead, ts, err := fh.entryChunkGroup.ReadDataAt(fileSize, buff, offset) if err != nil && err != io.EOF { - glog.Errorf("file handle read %s: %v", fileFullPath, err) + log.Errorf("file handle read %s: %v", fileFullPath, err) } - // glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) + // log.V(-1).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) return int64(totalRead), ts, err } @@ -83,7 +83,7 @@ func (fh *FileHandle) downloadRemoteEntry(entry *LockedEntry) error { Name: entry.Name, } - glog.V(4).Infof("download entry: %v", request) + log.V(-1).Infof("download entry: %v", request) resp, err := client.CacheRemoteObjectToLocalCluster(context.Background(), request) if err != nil { return fmt.Errorf("CacheRemoteObjectToLocalCluster file %s: %v", fileFullPath, err) diff --git a/weed/mount/filer_conf.go b/weed/mount/filer_conf.go index 3c71bb9ce..de383e660 100644 --- a/weed/mount/filer_conf.go +++ b/weed/mount/filer_conf.go @@ -7,7 +7,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -38,7 +38,7 @@ func (wfs *WFS) subscribeFilerConfEvents() (*meta_cache.MetadataFollower, error) }) if err != nil { if errors.Is(err, filer_pb.ErrNotFound) { - glog.V(0).Infof("fuse filer conf %s not found", confFullName) + log.V(3).Infof("fuse filer conf %s not found", confFullName) } else { return nil, err } diff --git a/weed/mount/inode_to_path.go b/weed/mount/inode_to_path.go index da38750d1..a21dd331c 100644 --- a/weed/mount/inode_to_path.go +++ b/weed/mount/inode_to_path.go @@ -2,7 +2,7 @@ package mount import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "sync" "time" @@ -126,7 +126,7 @@ func (i *InodeToPath) GetInode(path util.FullPath) (uint64, bool) { defer i.Unlock() inode, found := i.path2inode[path] if !found { - // glog.Fatalf("GetInode unknown inode for %s", path) + // log.Fatalf("GetInode unknown inode for %s", path) // this could be the parent for mount point } return inode, found @@ -155,8 +155,8 @@ func (i *InodeToPath) MarkChildrenCached(fullpath util.FullPath) { inode, found := i.path2inode[fullpath] if !found { // https://github.com/seaweedfs/seaweedfs/issues/4968 - // glog.Fatalf("MarkChildrenCached not found inode %v", fullpath) - glog.Warningf("MarkChildrenCached not found inode %v", fullpath) + // log.Fatalf("MarkChildrenCached not found inode %v", fullpath) + log.Warningf("MarkChildrenCached not found inode %v", fullpath) return } path, found := i.inode2path[inode] @@ -263,7 +263,7 @@ func (i *InodeToPath) MovePath(sourcePath, targetPath util.FullPath) (sourceInod entry.nlookup++ } } else { - glog.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode) + log.Errorf("MovePath %s to %s: sourceInode %d not found", sourcePath, targetPath, sourceInode) } return } diff --git a/weed/mount/meta_cache/meta_cache.go b/weed/mount/meta_cache/meta_cache.go index 0f0b1de30..99f6546fb 100644 --- a/weed/mount/meta_cache/meta_cache.go +++ b/weed/mount/meta_cache/meta_cache.go @@ -8,7 +8,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/filer" "github.com/seaweedfs/seaweedfs/weed/filer/leveldb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -51,7 +51,7 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore { } if err := store.Initialize(config, ""); err != nil { - glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err) + log.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err) } return filer.NewFilerStoreWrapper(store) @@ -74,7 +74,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti entry, err := mc.localStore.FindEntry(ctx, oldPath) if err != nil && err != filer_pb.ErrNotFound { - glog.Errorf("Metacache: find entry error: %v", err) + log.Errorf("Metacache: find entry error: %v", err) return err } if entry != nil { @@ -84,7 +84,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti // leave the update to the following InsertEntry operation } else { ctx = context.WithValue(ctx, "OP", "MV") - glog.V(3).Infof("DeleteEntry %s", oldPath) + log.V(0).Infof("DeleteEntry %s", oldPath) if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { return err } @@ -97,7 +97,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti if newEntry != nil { newDir, _ := newEntry.DirAndName() if mc.isCachedFn(util.FullPath(newDir)) { - glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name()) + log.V(0).Infof("InsertEntry %s/%s", newDir, newEntry.Name()) if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil { return err } @@ -143,7 +143,7 @@ func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.Full if !mc.isCachedFn(dirPath) { // if this request comes after renaming, it should be fine - glog.Warningf("unsynchronized dir: %v", dirPath) + log.Warningf("unsynchronized dir: %v", dirPath) } _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool { diff --git a/weed/mount/meta_cache/meta_cache_init.go b/weed/mount/meta_cache/meta_cache_init.go index 1cab499e0..01953af8d 100644 --- a/weed/mount/meta_cache/meta_cache_init.go +++ b/weed/mount/meta_cache/meta_cache_init.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -41,7 +41,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full func doEnsureVisited(mc *MetaCache, client filer_pb.FilerClient, path util.FullPath) error { - glog.V(4).Infof("ReadDirAllEntries %s ...", path) + log.V(-1).Infof("ReadDirAllEntries %s ...", path) err := util.Retry("ReadDirAllEntries", func() error { return filer_pb.ReadDirAllEntries(client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error { @@ -50,7 +50,7 @@ func doEnsureVisited(mc *MetaCache, client filer_pb.FilerClient, path util.FullP return nil } if err := mc.doInsertEntry(context.Background(), entry); err != nil { - glog.V(0).Infof("read %s: %v", entry.FullPath, err) + log.V(3).Infof("read %s: %v", entry.FullPath, err) return err } return nil diff --git a/weed/mount/meta_cache/meta_cache_subscribe.go b/weed/mount/meta_cache/meta_cache_subscribe.go index 9a4553013..4e37a4753 100644 --- a/weed/mount/meta_cache/meta_cache_subscribe.go +++ b/weed/mount/meta_cache/meta_cache_subscribe.go @@ -3,7 +3,7 @@ package meta_cache import ( "context" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -63,7 +63,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil var newEntry *filer.Entry if message.OldEntry != nil { oldPath = util.NewFullPath(dir, message.OldEntry.Name) - glog.V(4).Infof("deleting %v", oldPath) + log.V(-1).Infof("deleting %v", oldPath) } if message.NewEntry != nil { @@ -71,7 +71,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil dir = message.NewParentPath } key := util.NewFullPath(dir, message.NewEntry.Name) - glog.V(4).Infof("creating %v", key) + log.V(-1).Infof("creating %v", key) newEntry = filer.FromPbEntry(dir, message.NewEntry) } err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry) @@ -116,7 +116,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil metadataFollowOption.ClientEpoch++ return pb.WithFilerClientFollowMetadata(client, metadataFollowOption, mergeProcessors(processEventFn, followers...)) }, func(err error) bool { - glog.Errorf("follow metadata updates: %v", err) + log.Errorf("follow metadata updates: %v", err) return true }) diff --git a/weed/mount/page_writer.go b/weed/mount/page_writer.go index 58ae03cda..0af6b5a6e 100644 --- a/weed/mount/page_writer.go +++ b/weed/mount/page_writer.go @@ -1,7 +1,7 @@ package mount import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount/page_writer" ) @@ -31,7 +31,7 @@ func newPageWriter(fh *FileHandle, chunkSize int64) *PageWriter { func (pw *PageWriter) AddPage(offset int64, data []byte, isSequential bool, tsNs int64) { - glog.V(4).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) + log.V(-1).Infof("%v AddPage [%d, %d)", pw.fh.fh, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { @@ -51,7 +51,7 @@ func (pw *PageWriter) FlushData() error { } func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64, tsNs int64) (maxStop int64) { - glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.inode, offset, offset+int64(len(data))) + log.V(-1).Infof("ReadDirtyDataAt %v [%d, %d)", pw.fh.inode, offset, offset+int64(len(data))) chunkIndex := offset / pw.chunkSize for i := chunkIndex; len(data) > 0; i++ { diff --git a/weed/mount/page_writer/page_chunk_swapfile.go b/weed/mount/page_writer/page_chunk_swapfile.go index dd9781b68..bd31d9221 100644 --- a/weed/mount/page_writer/page_chunk_swapfile.go +++ b/weed/mount/page_writer/page_chunk_swapfile.go @@ -1,7 +1,7 @@ package page_writer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util/mem" "io" @@ -53,7 +53,7 @@ func (sf *SwapFile) NewSwapFileChunk(logicChunkIndex LogicChunkIndex) (tc *SwapF var err error sf.file, err = os.CreateTemp(sf.dir, "") if err != nil { - glog.Errorf("create swap file: %v", err) + log.Errorf("create swap file: %v", err) return nil } } @@ -108,7 +108,7 @@ func (sc *SwapFileChunk) WriteDataAt(src []byte, offset int64, tsNs int64) (n in n, err = sc.swapfile.file.WriteAt(src, int64(sc.actualChunkIndex)*sc.swapfile.chunkSize+innerOffset) sc.usage.MarkWritten(innerOffset, innerOffset+int64(n), tsNs) if err != nil { - glog.Errorf("failed to write swap file %s: %v", sc.swapfile.file.Name(), err) + log.Errorf("failed to write swap file %s: %v", sc.swapfile.file.Name(), err) } //sc.memChunk.WriteDataAt(src, offset, tsNs) sc.activityScore.MarkWrite() @@ -135,7 +135,7 @@ func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop in if err == io.EOF && n == int(logicStop-logicStart) { err = nil } - glog.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err) + log.Errorf("failed to reading swap file %s: %v", sc.swapfile.file.Name(), err) break } maxStop = max(maxStop, logicStop) diff --git a/weed/mount/page_writer/upload_pipeline.go b/weed/mount/page_writer/upload_pipeline.go index bd7fc99dd..acbee2802 100644 --- a/weed/mount/page_writer/upload_pipeline.go +++ b/weed/mount/page_writer/upload_pipeline.go @@ -2,7 +2,7 @@ package page_writer import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "sync" "sync/atomic" @@ -34,7 +34,7 @@ type SealedChunk struct { func (sc *SealedChunk) FreeReference(messageOnFree string) { sc.referenceCounter-- if sc.referenceCounter == 0 { - glog.V(4).Infof("Free sealed chunk: %s", messageOnFree) + log.V(-1).Infof("Free sealed chunk: %s", messageOnFree) sc.chunk.FreeResource() } } @@ -131,7 +131,7 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxS sealedChunk, found := up.sealedChunks[logicChunkIndex] if found { maxStop = sealedChunk.chunk.ReadDataAt(p, off, tsNs) - glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) + log.V(-1).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop) } // read from writable chunks last @@ -140,7 +140,7 @@ func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxS return } writableMaxStop := writableChunk.ReadDataAt(p, off, tsNs) - glog.V(4).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop) + log.V(-1).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop) maxStop = max(maxStop, writableMaxStop) return @@ -168,7 +168,7 @@ func (up *UploadPipeline) maybeMoveToSealed(memChunk PageChunk, logicChunkIndex func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) { atomic.AddInt32(&up.uploaderCount, 1) - glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) + log.V(-1).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount) if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found { oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex)) @@ -188,7 +188,7 @@ func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex Logic // notify waiting process atomic.AddInt32(&up.uploaderCount, -1) - glog.V(4).Infof("%s uploaderCount %d --> %d", up.filepath, up.uploaderCount+1, up.uploaderCount) + log.V(-1).Infof("%s uploaderCount %d --> %d", up.filepath, up.uploaderCount+1, up.uploaderCount) // Lock and Unlock are not required, // but it may signal multiple times during one wakeup, // and the waiting goroutine may miss some of them! diff --git a/weed/mount/weedfs.go b/weed/mount/weedfs.go index 77ffb7e77..89a9c0617 100644 --- a/weed/mount/weedfs.go +++ b/weed/mount/weedfs.go @@ -183,7 +183,7 @@ func (wfs *WFS) maybeReadEntry(inode uint64) (path util.FullPath, fh *FileHandle func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.Status) { - // glog.V(3).Infof("read entry cache miss %s", fullpath) + // log.V(0).Infof("read entry cache miss %s", fullpath) dir, name := fullpath.DirAndName() // return a valid entry for the mount root diff --git a/weed/mount/weedfs_attr.go b/weed/mount/weedfs_attr.go index 0bd5771cd..92bf8c3af 100644 --- a/weed/mount/weedfs_attr.go +++ b/weed/mount/weedfs_attr.go @@ -7,12 +7,12 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) func (wfs *WFS) GetAttr(cancel <-chan struct{}, input *fuse.GetAttrIn, out *fuse.AttrOut) (code fuse.Status) { - glog.V(4).Infof("GetAttr %v", input.NodeId) + log.V(-1).Infof("GetAttr %v", input.NodeId) if input.NodeId == 1 { wfs.setRootAttr(out) return fuse.OK @@ -57,7 +57,7 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse } if size, ok := input.GetSize(); ok { - glog.V(4).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.GetChunks())) + log.V(-1).Infof("%v setattr set size=%v chunks=%d", path, size, len(entry.GetChunks())) if size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) var chunks []*filer_pb.FileChunk @@ -69,10 +69,10 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse int64Size = int64(size) - chunk.Offset if int64Size > 0 { chunks = append(chunks, chunk) - glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size) + log.V(-1).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size) chunk.Size = uint64(int64Size) } else { - glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) + log.V(-1).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) truncatedChunks = append(truncatedChunks, chunk) } } else { @@ -96,7 +96,7 @@ func (wfs *WFS) SetAttr(cancel <-chan struct{}, input *fuse.SetAttrIn, out *fuse entry.WormEnforcedAtTsNs = time.Now().UnixNano() } - // glog.V(4).Infof("setAttr mode %o", mode) + // log.V(-1).Infof("setAttr mode %o", mode) entry.Attributes.FileMode = chmod(entry.Attributes.FileMode, mode) if input.NodeId == 1 { wfs.option.MountMode = os.FileMode(chmod(uint32(wfs.option.MountMode), mode)) diff --git a/weed/mount/weedfs_dir_lookup.go b/weed/mount/weedfs_dir_lookup.go index 7fc10ef28..55750d409 100644 --- a/weed/mount/weedfs_dir_lookup.go +++ b/weed/mount/weedfs_dir_lookup.go @@ -6,7 +6,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -31,7 +31,7 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin visitErr := meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath) if visitErr != nil { - glog.Errorf("dir Lookup %s: %v", dirPath, visitErr) + log.Errorf("dir Lookup %s: %v", dirPath, visitErr) return fuse.EIO } localEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullFilePath) @@ -40,15 +40,15 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin } if localEntry == nil { - // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + // log.V(0).Infof("dir Lookup cache miss %s", fullFilePath) entry, err := filer_pb.GetEntry(wfs, fullFilePath) if err != nil { - glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + log.V(2).Infof("dir GetEntry %s: %v", fullFilePath, err) return fuse.ENOENT } localEntry = filer.FromPbEntry(string(dirPath), entry) } else { - glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) + log.V(-1).Infof("dir Lookup cache hit %s", fullFilePath) } if localEntry == nil { @@ -60,7 +60,7 @@ func (wfs *WFS) Lookup(cancel <-chan struct{}, header *fuse.InHeader, name strin if fh, found := wfs.fhMap.FindFileHandle(inode); found { fh.entryLock.RLock() if entry := fh.GetEntry().GetEntry(); entry != nil { - glog.V(4).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(entry)) + log.V(-1).Infof("lookup opened file %s size %d", dirPath.Child(localEntry.Name()), filer.FileSize(entry)) localEntry = filer.FromPbEntry(string(dirPath), entry) } fh.entryLock.RUnlock() diff --git a/weed/mount/weedfs_dir_mkrm.go b/weed/mount/weedfs_dir_mkrm.go index e69c9796e..612ea97a0 100644 --- a/weed/mount/weedfs_dir_mkrm.go +++ b/weed/mount/weedfs_dir_mkrm.go @@ -11,7 +11,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -62,9 +62,9 @@ func (wfs *WFS) Mkdir(cancel <-chan struct{}, in *fuse.MkdirIn, name string, out SkipCheckParentDirectory: true, } - glog.V(1).Infof("mkdir: %v", request) + log.V(2).Infof("mkdir: %v", request) if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("mkdir %s: %v", entryFullPath, err) + log.V(3).Infof("mkdir %s: %v", entryFullPath, err) return err } @@ -75,7 +75,7 @@ func (wfs *WFS) Mkdir(cancel <-chan struct{}, in *fuse.MkdirIn, name string, out return nil }) - glog.V(3).Infof("mkdir %s: %v", entryFullPath, err) + log.V(0).Infof("mkdir %s: %v", entryFullPath, err) if err != nil { return fuse.EIO @@ -105,11 +105,11 @@ func (wfs *WFS) Rmdir(cancel <-chan struct{}, header *fuse.InHeader, name string } entryFullPath := dirFullPath.Child(name) - glog.V(3).Infof("remove directory: %v", entryFullPath) + log.V(0).Infof("remove directory: %v", entryFullPath) ignoreRecursiveErr := true // ignore recursion error since the OS should manage it err := filer_pb.Remove(wfs, string(dirFullPath), name, true, false, ignoreRecursiveErr, false, []int32{wfs.signature}) if err != nil { - glog.V(0).Infof("remove %s: %v", entryFullPath, err) + log.V(3).Infof("remove %s: %v", entryFullPath, err) if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { return fuse.Status(syscall.ENOTEMPTY) } diff --git a/weed/mount/weedfs_dir_read.go b/weed/mount/weedfs_dir_read.go index 6e18b50e8..cada1ecac 100644 --- a/weed/mount/weedfs_dir_read.go +++ b/weed/mount/weedfs_dir_read.go @@ -4,7 +4,7 @@ import ( "context" "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mount/meta_cache" "github.com/seaweedfs/seaweedfs/weed/util" "math" @@ -170,7 +170,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl return false } if fh, found := wfs.fhMap.FindFileHandle(inode); found { - glog.V(4).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name)) + log.V(-1).Infof("readdir opened file %s", dirPath.Child(dirEntry.Name)) entry = filer.FromPbEntry(string(dirPath), fh.GetEntry().GetEntry()) } wfs.outputFilerEntry(entryOut, inode, entry) @@ -218,7 +218,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl var err error if err = meta_cache.EnsureVisited(wfs.metaCache, wfs, dirPath); err != nil { - glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) + log.Errorf("dir ReadDirAll %s: %v", dirPath, err) return fuse.EIO } listErr := wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, lastEntryName, false, int64(math.MaxInt32), func(entry *filer.Entry) bool { @@ -226,7 +226,7 @@ func (wfs *WFS) doReadDirectory(input *fuse.ReadIn, out *fuse.DirEntryList, isPl return processEachEntryFn(entry) }) if listErr != nil { - glog.Errorf("list meta cache: %v", listErr) + log.Errorf("list meta cache: %v", listErr) return fuse.EIO } diff --git a/weed/mount/weedfs_file_copy_range.go b/weed/mount/weedfs_file_copy_range.go index 43ec289ab..5bb5f4136 100644 --- a/weed/mount/weedfs_file_copy_range.go +++ b/weed/mount/weedfs_file_copy_range.go @@ -7,7 +7,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // CopyFileRange copies data from one file to another from and to specified offsets. @@ -62,7 +62,7 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) return 0, fuse.EISDIR } - glog.V(4).Infof( + log.V(-1).Infof( "CopyFileRange %s fhIn %d -> %s fhOut %d, [%d,%d) -> [%d,%d)", fhIn.FullPath(), fhIn.fh, fhOut.FullPath(), fhOut.fh, @@ -73,7 +73,7 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn) data := make([]byte, in.Len) totalRead, err := readDataByFileHandle(data, fhIn, int64(in.OffIn)) if err != nil { - glog.Warningf("file handle read %s %d: %v", fhIn.FullPath(), totalRead, err) + log.Warningf("file handle read %s %d: %v", fhIn.FullPath(), totalRead, err) return 0, fuse.EIO } data = data[:totalRead] diff --git a/weed/mount/weedfs_file_io.go b/weed/mount/weedfs_file_io.go index 04fe7f21c..e641ab5a4 100644 --- a/weed/mount/weedfs_file_io.go +++ b/weed/mount/weedfs_file_io.go @@ -2,7 +2,7 @@ package mount import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) /** @@ -71,7 +71,7 @@ func (wfs *WFS) Open(cancel <-chan struct{}, in *fuse.OpenIn, out *fuse.OpenOut) // remove the direct_io flag, as it is not well-supported on macOS // https://code.google.com/archive/p/macfuse/wikis/OPTIONS.wiki recommended to avoid the direct_io flag if in.Flags&fuse.FOPEN_DIRECT_IO != 0 { - glog.V(4).Infof("macfuse direct_io mode %v => false\n", in.Flags&fuse.FOPEN_DIRECT_IO != 0) + log.V(-1).Infof("macfuse direct_io mode %v => false\n", in.Flags&fuse.FOPEN_DIRECT_IO != 0) out.OpenFlags &^= fuse.FOPEN_DIRECT_IO } } diff --git a/weed/mount/weedfs_file_lseek.go b/weed/mount/weedfs_file_lseek.go index 0cf7ef43b..275545f72 100644 --- a/weed/mount/weedfs_file_lseek.go +++ b/weed/mount/weedfs_file_lseek.go @@ -7,7 +7,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // These are non-POSIX extensions @@ -42,7 +42,7 @@ func (wfs *WFS) Lseek(cancel <-chan struct{}, in *fuse.LseekIn, out *fuse.LseekO fileSize := int64(filer.FileSize(fh.GetEntry().GetEntry())) offset := max(int64(in.Offset), 0) - glog.V(4).Infof( + log.V(-1).Infof( "Lseek %s fh %d in [%d,%d], whence %d", fh.FullPath(), fh.fh, offset, fileSize, in.Whence, ) diff --git a/weed/mount/weedfs_file_mkrm.go b/weed/mount/weedfs_file_mkrm.go index e6139e88e..e6430c452 100644 --- a/weed/mount/weedfs_file_mkrm.go +++ b/weed/mount/weedfs_file_mkrm.go @@ -8,7 +8,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -82,9 +82,9 @@ func (wfs *WFS) Mknod(cancel <-chan struct{}, in *fuse.MknodIn, name string, out SkipCheckParentDirectory: true, } - glog.V(1).Infof("mknod: %v", request) + log.V(2).Infof("mknod: %v", request) if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("mknod %s: %v", entryFullPath, err) + log.V(3).Infof("mknod %s: %v", entryFullPath, err) return err } @@ -95,7 +95,7 @@ func (wfs *WFS) Mknod(cancel <-chan struct{}, in *fuse.MknodIn, name string, out return nil }) - glog.V(3).Infof("mknod %s: %v", entryFullPath, err) + log.V(0).Infof("mknod %s: %v", entryFullPath, err) if err != nil { return fuse.EIO @@ -135,17 +135,17 @@ func (wfs *WFS) Unlink(cancel <-chan struct{}, header *fuse.InHeader, name strin } // first, ensure the filer store can correctly delete - glog.V(3).Infof("remove file: %v", entryFullPath) + log.V(0).Infof("remove file: %v", entryFullPath) isDeleteData := entry != nil && entry.HardLinkCounter <= 1 err := filer_pb.Remove(wfs, string(dirFullPath), name, isDeleteData, false, false, false, []int32{wfs.signature}) if err != nil { - glog.V(0).Infof("remove %s: %v", entryFullPath, err) + log.V(3).Infof("remove %s: %v", entryFullPath, err) return fuse.OK } // then, delete meta cache if err = wfs.metaCache.DeleteEntry(context.Background(), entryFullPath); err != nil { - glog.V(3).Infof("local DeleteEntry %s: %v", entryFullPath, err) + log.V(0).Infof("local DeleteEntry %s: %v", entryFullPath, err) return fuse.EIO } diff --git a/weed/mount/weedfs_file_read.go b/weed/mount/weedfs_file_read.go index bf9c89071..8b39d1fc9 100644 --- a/weed/mount/weedfs_file_read.go +++ b/weed/mount/weedfs_file_read.go @@ -8,7 +8,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) /** @@ -48,7 +48,7 @@ func (wfs *WFS) Read(cancel <-chan struct{}, in *fuse.ReadIn, buff []byte) (fuse offset := int64(in.Offset) totalRead, err := readDataByFileHandle(buff, fh, offset) if err != nil { - glog.Warningf("file handle read %s %d: %v", fh.FullPath(), totalRead, err) + log.Warningf("file handle read %s %d: %v", fh.FullPath(), totalRead, err) return nil, fuse.EIO } diff --git a/weed/mount/weedfs_file_sync.go b/weed/mount/weedfs_file_sync.go index e13ab198d..0eabe3216 100644 --- a/weed/mount/weedfs_file_sync.go +++ b/weed/mount/weedfs_file_sync.go @@ -8,7 +8,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -98,11 +98,11 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { fileFullPath := fh.FullPath() dir, name := fileFullPath.DirAndName() // send the data to the OS - glog.V(4).Infof("doFlush %s fh %d", fileFullPath, fh.fh) + log.V(-1).Infof("doFlush %s fh %d", fileFullPath, fh.fh) if !wfs.IsOverQuota { if err := fh.dirtyPages.FlushData(); err != nil { - glog.Errorf("%v doFlush: %v", fileFullPath, err) + log.Errorf("%v doFlush: %v", fileFullPath, err) return fuse.EIO } } @@ -141,9 +141,9 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { SkipCheckParentDirectory: true, } - glog.V(4).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks())) + log.V(-1).Infof("%s set chunks: %v", fileFullPath, len(entry.GetChunks())) //for i, chunk := range entry.GetChunks() { - // glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + // log.V(-1).Infof("%s chunks %d: %v [%d,%d)", fileFullPath, i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) //} manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.GetChunks()) @@ -152,7 +152,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { chunks, manifestErr := filer.MaybeManifestize(wfs.saveDataAsChunk(fileFullPath), chunks) if manifestErr != nil { // not good, but should be ok - glog.V(0).Infof("MaybeManifestize: %v", manifestErr) + log.V(3).Infof("MaybeManifestize: %v", manifestErr) } entry.Chunks = append(chunks, manifestChunks...) @@ -160,7 +160,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { defer wfs.mapPbIdFromFilerToLocal(request.Entry) if err := filer_pb.CreateEntry(client, request); err != nil { - glog.Errorf("fh flush create %s: %v", fileFullPath, err) + log.Errorf("fh flush create %s: %v", fileFullPath, err) return fmt.Errorf("fh flush create %s: %v", fileFullPath, err) } @@ -174,7 +174,7 @@ func (wfs *WFS) doFlush(fh *FileHandle, uid, gid uint32) fuse.Status { } if err != nil { - glog.Errorf("%v fh %d flush: %v", fileFullPath, fh.fh, err) + log.Errorf("%v fh %d flush: %v", fileFullPath, fh.fh, err) return fuse.EIO } diff --git a/weed/mount/weedfs_file_write.go b/weed/mount/weedfs_file_write.go index 1ec20c294..0e72420da 100644 --- a/weed/mount/weedfs_file_write.go +++ b/weed/mount/weedfs_file_write.go @@ -60,7 +60,7 @@ func (wfs *WFS) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (wr entry.Content = nil offset := int64(in.Offset) entry.Attributes.FileSize = uint64(max(offset+int64(len(data)), int64(entry.Attributes.FileSize))) - // glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) + // log.V(-1).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) fh.dirtyPages.AddPage(offset, data, fh.dirtyPages.writerPattern.IsSequentialMode(), tsNs) diff --git a/weed/mount/weedfs_grpc_server.go b/weed/mount/weedfs_grpc_server.go index f867f2d80..245f8c70a 100644 --- a/weed/mount/weedfs_grpc_server.go +++ b/weed/mount/weedfs_grpc_server.go @@ -3,7 +3,7 @@ package mount import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/mount_pb" ) @@ -11,7 +11,7 @@ func (wfs *WFS) Configure(ctx context.Context, request *mount_pb.ConfigureReques if wfs.option.Collection == "" { return nil, fmt.Errorf("mount quota only works when mounted to a new folder with a collection") } - glog.V(0).Infof("quota changed from %d to %d", wfs.option.Quota, request.CollectionCapacity) + log.V(3).Infof("quota changed from %d to %d", wfs.option.Quota, request.CollectionCapacity) wfs.option.Quota = request.GetCollectionCapacity() return &mount_pb.ConfigureResponse{}, nil } diff --git a/weed/mount/weedfs_link.go b/weed/mount/weedfs_link.go index 8c5b67ce2..a1786ea76 100644 --- a/weed/mount/weedfs_link.go +++ b/weed/mount/weedfs_link.go @@ -8,7 +8,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -105,7 +105,7 @@ func (wfs *WFS) Link(cancel <-chan struct{}, in *fuse.LinkIn, name string, out * newEntryPath := newParentPath.Child(name) if err != nil { - glog.V(0).Infof("Link %v -> %s: %v", oldEntryPath, newEntryPath, err) + log.V(3).Infof("Link %v -> %s: %v", oldEntryPath, newEntryPath, err) return fuse.EIO } diff --git a/weed/mount/weedfs_quota.go b/weed/mount/weedfs_quota.go index 23f487549..b8a902db2 100644 --- a/weed/mount/weedfs_quota.go +++ b/weed/mount/weedfs_quota.go @@ -3,7 +3,7 @@ package mount import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "time" ) @@ -29,16 +29,16 @@ func (wfs *WFS) loopCheckQuota() { resp, err := client.Statistics(context.Background(), request) if err != nil { - glog.V(0).Infof("reading quota usage %v: %v", request, err) + log.V(3).Infof("reading quota usage %v: %v", request, err) return err } - glog.V(4).Infof("read quota usage: %+v", resp) + log.V(-1).Infof("read quota usage: %+v", resp) isOverQuota := int64(resp.UsedSize) > wfs.option.Quota if isOverQuota && !wfs.IsOverQuota { - glog.Warningf("Quota Exceeded! quota:%d used:%d", wfs.option.Quota, resp.UsedSize) + log.Warningf("Quota Exceeded! quota:%d used:%d", wfs.option.Quota, resp.UsedSize) } else if !isOverQuota && wfs.IsOverQuota { - glog.Warningf("Within quota limit! quota:%d used:%d", wfs.option.Quota, resp.UsedSize) + log.Warningf("Within quota limit! quota:%d used:%d", wfs.option.Quota, resp.UsedSize) } wfs.IsOverQuota = isOverQuota @@ -46,7 +46,7 @@ func (wfs *WFS) loopCheckQuota() { }) if err != nil { - glog.Warningf("read quota usage: %v", err) + log.Warningf("read quota usage: %v", err) } } diff --git a/weed/mount/weedfs_rename.go b/weed/mount/weedfs_rename.go index e567b12e1..b9d1999ac 100644 --- a/weed/mount/weedfs_rename.go +++ b/weed/mount/weedfs_rename.go @@ -10,7 +10,7 @@ import ( "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -170,7 +170,7 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string return fuse.EPERM } - glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) + log.V(-1).Infof("dir Rename %s => %s", oldPath, newPath) // update remote filer err := wfs.WithFilerClient(true, func(client filer_pb.SeaweedFilerClient) error { @@ -207,7 +207,7 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string } if err = wfs.handleRenameResponse(ctx, resp); err != nil { - glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) + log.V(3).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) return err } @@ -217,7 +217,7 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string }) if err != nil { - glog.V(0).Infof("Link: %v", err) + log.V(3).Infof("Link: %v", err) return } @@ -228,7 +228,7 @@ func (wfs *WFS) Rename(cancel <-chan struct{}, in *fuse.RenameIn, oldName string func (wfs *WFS) handleRenameResponse(ctx context.Context, resp *filer_pb.StreamRenameEntryResponse) error { // comes from filer StreamRenameEntry, can only be create or delete entry - glog.V(4).Infof("dir Rename %+v", resp.EventNotification) + log.V(-1).Infof("dir Rename %+v", resp.EventNotification) if resp.EventNotification.NewEntry != nil { // with new entry, the old entry name also exists. This is the first step to create new entry diff --git a/weed/mount/weedfs_stats.go b/weed/mount/weedfs_stats.go index 28e992158..1ba5eef0b 100644 --- a/weed/mount/weedfs_stats.go +++ b/weed/mount/weedfs_stats.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/hanwen/go-fuse/v2/fuse" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "math" "time" @@ -19,7 +19,7 @@ type statsCache struct { func (wfs *WFS) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.StatfsOut) (code fuse.Status) { - // glog.V(4).Infof("reading fs stats") + // log.V(-1).Infof("reading fs stats") if wfs.stats.lastChecked < time.Now().Unix()-20 { @@ -32,13 +32,13 @@ func (wfs *WFS) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.Stat DiskType: string(wfs.option.DiskType), } - glog.V(4).Infof("reading filer stats: %+v", request) + log.V(-1).Infof("reading filer stats: %+v", request) resp, err := client.Statistics(context.Background(), request) if err != nil { - glog.V(0).Infof("reading filer stats %v: %v", request, err) + log.V(3).Infof("reading filer stats %v: %v", request, err) return err } - glog.V(4).Infof("read filer stats: %+v", resp) + log.V(-1).Infof("read filer stats: %+v", resp) wfs.stats.TotalSize = resp.TotalSize wfs.stats.UsedSize = resp.UsedSize @@ -48,7 +48,7 @@ func (wfs *WFS) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.Stat return nil }) if err != nil { - glog.V(0).Infof("filer Statistics: %v", err) + log.V(3).Infof("filer Statistics: %v", err) return fuse.OK } } diff --git a/weed/mount/weedfs_symlink.go b/weed/mount/weedfs_symlink.go index 8842ec3e6..eaf2559a2 100644 --- a/weed/mount/weedfs_symlink.go +++ b/weed/mount/weedfs_symlink.go @@ -10,7 +10,7 @@ import ( "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -62,7 +62,7 @@ func (wfs *WFS) Symlink(cancel <-chan struct{}, header *fuse.InHeader, target st return nil }) if err != nil { - glog.V(0).Infof("Symlink %s => %s: %v", entryFullPath, target, err) + log.V(3).Infof("Symlink %s => %s: %v", entryFullPath, target, err) return fuse.EIO } diff --git a/weed/mount/weedfs_write.go b/weed/mount/weedfs_write.go index 77ad01b89..938947781 100644 --- a/weed/mount/weedfs_write.go +++ b/weed/mount/weedfs_write.go @@ -5,7 +5,7 @@ import ( "io" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -48,11 +48,11 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun ) if err != nil { - glog.V(0).Infof("upload data %v: %v", filename, err) + log.V(3).Infof("upload data %v: %v", filename, err) return nil, fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v: %v", filename, err) + log.V(3).Infof("upload failure %v: %v", filename, err) return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } diff --git a/weed/mount/wfs_filer_client.go b/weed/mount/wfs_filer_client.go index 5dd09363f..a6803a253 100644 --- a/weed/mount/wfs_filer_client.go +++ b/weed/mount/wfs_filer_client.go @@ -5,7 +5,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -28,7 +28,7 @@ func (wfs *WFS) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFile }, filerGrpcAddress, false, wfs.option.GrpcDialOption) if err != nil { - glog.V(0).Infof("WithFilerClient %d %v: %v", x, filerGrpcAddress, err) + log.V(3).Infof("WithFilerClient %d %v: %v", x, filerGrpcAddress, err) } else { atomic.StoreInt32(&wfs.option.filerIndex, i) return nil diff --git a/weed/mount/wfs_save.go b/weed/mount/wfs_save.go index 56ad47011..02cc24aa4 100644 --- a/weed/mount/wfs_save.go +++ b/weed/mount/wfs_save.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/hanwen/go-fuse/v2/fuse" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "syscall" @@ -26,7 +26,7 @@ func (wfs *WFS) saveEntry(path util.FullPath, entry *filer_pb.Entry) (code fuse. Signatures: []int32{wfs.signature}, } - glog.V(1).Infof("save entry: %v", request) + log.V(2).Infof("save entry: %v", request) _, err := client.UpdateEntry(context.Background(), request) if err != nil { return fmt.Errorf("UpdateEntry dir %s: %v", path, err) @@ -39,7 +39,7 @@ func (wfs *WFS) saveEntry(path util.FullPath, entry *filer_pb.Entry) (code fuse. return nil }) if err != nil { - glog.Errorf("saveEntry %s: %v", path, err) + log.Errorf("saveEntry %s: %v", path, err) return fuse.EIO } diff --git a/weed/mq/agent/agent_grpc_subscribe.go b/weed/mq/agent/agent_grpc_subscribe.go index 87baa466c..ac7cc1c6f 100644 --- a/weed/mq/agent/agent_grpc_subscribe.go +++ b/weed/mq/agent/agent_grpc_subscribe.go @@ -2,7 +2,7 @@ package agent import ( "context" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/client/sub_client" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pb" @@ -31,7 +31,7 @@ func (a *MessageQueueAgent) SubscribeRecord(stream mq_agent_pb.SeaweedMessagingA record := &schema_pb.RecordValue{} err := proto.Unmarshal(m.Data.Value, record) if err != nil { - glog.V(0).Infof("unmarshal record value: %v", err) + log.V(3).Infof("unmarshal record value: %v", err) if lastErr == nil { lastErr = err } @@ -42,7 +42,7 @@ func (a *MessageQueueAgent) SubscribeRecord(stream mq_agent_pb.SeaweedMessagingA Value: record, TsNs: m.Data.TsNs, }); sendErr != nil { - glog.V(0).Infof("send record: %v", sendErr) + log.V(3).Infof("send record: %v", sendErr) if lastErr == nil { lastErr = sendErr } @@ -53,7 +53,7 @@ func (a *MessageQueueAgent) SubscribeRecord(stream mq_agent_pb.SeaweedMessagingA go func() { subErr := subscriber.Subscribe() if subErr != nil { - glog.V(0).Infof("subscriber %s subscribe: %v", subscriber.SubscriberConfig.String(), subErr) + log.V(3).Infof("subscriber %s subscribe: %v", subscriber.SubscriberConfig.String(), subErr) if lastErr == nil { lastErr = subErr } @@ -63,7 +63,7 @@ func (a *MessageQueueAgent) SubscribeRecord(stream mq_agent_pb.SeaweedMessagingA for { m, err := stream.Recv() if err != nil { - glog.V(0).Infof("subscriber %s receive: %v", subscriber.SubscriberConfig.String(), err) + log.V(3).Infof("subscriber %s receive: %v", subscriber.SubscriberConfig.String(), err) return err } if m != nil { diff --git a/weed/mq/broker/broker_connect.go b/weed/mq/broker/broker_connect.go index 386d86570..8018cfe57 100644 --- a/weed/mq/broker/broker_connect.go +++ b/weed/mq/broker/broker_connect.go @@ -3,7 +3,7 @@ package broker import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "io" @@ -16,7 +16,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop self := string(b.option.BrokerAddress()) - glog.V(0).Infof("broker %s connects to balancer %s", self, brokerBalancer) + log.V(3).Infof("broker %s connects to balancer %s", self, brokerBalancer) if brokerBalancer == "" { return fmt.Errorf("no balancer found") } @@ -59,7 +59,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop } return fmt.Errorf("send stats message to balancer %s: %v", brokerBalancer, err) } - // glog.V(3).Infof("sent stats: %+v", stats) + // log.V(0).Infof("sent stats: %+v", stats) time.Sleep(time.Millisecond*5000 + time.Duration(rand.Intn(1000))*time.Millisecond) } @@ -82,7 +82,7 @@ func (b *MessageQueueBroker) KeepConnectedToBrokerBalancer(newBrokerBalancerCh c for { err := b.BrokerConnectToBalancer(newBrokerBalancer, thisRunStopChan) if err != nil { - glog.V(0).Infof("connect to balancer %s: %v", newBrokerBalancer, err) + log.V(3).Infof("connect to balancer %s: %v", newBrokerBalancer, err) time.Sleep(time.Second) } else { break diff --git a/weed/mq/broker/broker_grpc_assign.go b/weed/mq/broker/broker_grpc_assign.go index 991208a72..989cccaef 100644 --- a/weed/mq/broker/broker_grpc_assign.go +++ b/weed/mq/broker/broker_grpc_assign.go @@ -3,7 +3,7 @@ package broker import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/logstore" "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" "github.com/seaweedfs/seaweedfs/weed/mq/topic" @@ -50,7 +50,7 @@ func (b *MessageQueueBroker) AssignTopicPartitions(c context.Context, request *m } } - glog.V(0).Infof("AssignTopicPartitions: topic %s partition assignments: %v", request.Topic, request.BrokerPartitionAssignments) + log.V(3).Infof("AssignTopicPartitions: topic %s partition assignments: %v", request.Topic, request.BrokerPartitionAssignments) return ret, nil } @@ -91,7 +91,7 @@ func (b *MessageQueueBroker) assignTopicPartitionsToBrokers(ctx context.Context, brokerStats.RegisterAssignment(t, bpa.Partition, isAdd) return nil }); doCreateErr != nil { - glog.Errorf("create topic %s partition %+v on %s: %v", t, bpa.Partition, bpa.LeaderBroker, doCreateErr) + log.Errorf("create topic %s partition %+v on %s: %v", t, bpa.Partition, bpa.LeaderBroker, doCreateErr) } }(bpa) } diff --git a/weed/mq/broker/broker_grpc_configure.go b/weed/mq/broker/broker_grpc_configure.go index f827f0b37..ea781807f 100644 --- a/weed/mq/broker/broker_grpc_configure.go +++ b/weed/mq/broker/broker_grpc_configure.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb" @@ -36,7 +36,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb. var readErr, assignErr error resp, readErr = b.fca.ReadTopicConfFromFiler(t) if readErr != nil { - glog.V(0).Infof("read topic %s conf: %v", request.Topic, readErr) + log.V(3).Infof("read topic %s conf: %v", request.Topic, readErr) } if resp != nil { @@ -47,13 +47,13 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb. } if readErr == nil && assignErr == nil && len(resp.BrokerPartitionAssignments) == int(request.PartitionCount) { - glog.V(0).Infof("existing topic partitions %d: %+v", len(resp.BrokerPartitionAssignments), resp.BrokerPartitionAssignments) + log.V(3).Infof("existing topic partitions %d: %+v", len(resp.BrokerPartitionAssignments), resp.BrokerPartitionAssignments) return } if resp != nil && len(resp.BrokerPartitionAssignments) > 0 { if cancelErr := b.assignTopicPartitionsToBrokers(ctx, request.Topic, resp.BrokerPartitionAssignments, false); cancelErr != nil { - glog.V(1).Infof("cancel old topic %s partitions assignments %v : %v", request.Topic, resp.BrokerPartitionAssignments, cancelErr) + log.V(2).Infof("cancel old topic %s partitions assignments %v : %v", request.Topic, resp.BrokerPartitionAssignments, cancelErr) } } resp = &mq_pb.ConfigureTopicResponse{} @@ -70,7 +70,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb. b.PubBalancer.OnPartitionChange(request.Topic, resp.BrokerPartitionAssignments) - glog.V(0).Infof("ConfigureTopic: topic %s partition assignments: %v", request.Topic, resp.BrokerPartitionAssignments) + log.V(3).Infof("ConfigureTopic: topic %s partition assignments: %v", request.Topic, resp.BrokerPartitionAssignments) return resp, err } diff --git a/weed/mq/broker/broker_grpc_lookup.go b/weed/mq/broker/broker_grpc_lookup.go index 65a1ffda8..07cecccd6 100644 --- a/weed/mq/broker/broker_grpc_lookup.go +++ b/weed/mq/broker/broker_grpc_lookup.go @@ -3,7 +3,7 @@ package broker import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" @@ -28,7 +28,7 @@ func (b *MessageQueueBroker) LookupTopicBrokers(ctx context.Context, request *mq conf := &mq_pb.ConfigureTopicResponse{} ret.Topic = request.Topic if conf, err = b.fca.ReadTopicConfFromFiler(t); err != nil { - glog.V(0).Infof("lookup topic %s conf: %v", request.Topic, err) + log.V(3).Infof("lookup topic %s conf: %v", request.Topic, err) } else { err = b.ensureTopicActiveAssignments(t, conf) ret.BrokerPartitionAssignments = conf.BrokerPartitionAssignments diff --git a/weed/mq/broker/broker_grpc_pub.go b/weed/mq/broker/broker_grpc_pub.go index f31dc7eff..64841a1b9 100644 --- a/weed/mq/broker/broker_grpc_pub.go +++ b/weed/mq/broker/broker_grpc_pub.go @@ -3,7 +3,7 @@ package broker import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "google.golang.org/grpc/peer" @@ -46,7 +46,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis initMessage := req.GetInit() if initMessage == nil { response.Error = fmt.Sprintf("missing init message") - glog.Errorf("missing init message") + log.Errorf("missing init message") return stream.Send(response) } @@ -55,14 +55,14 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, p) if getOrGenErr != nil { response.Error = fmt.Sprintf("topic %v not found: %v", t, getOrGenErr) - glog.Errorf("topic %v not found: %v", t, getOrGenErr) + log.Errorf("topic %v not found: %v", t, getOrGenErr) return stream.Send(response) } // connect to follower brokers if followerErr := localTopicPartition.MaybeConnectToFollowers(initMessage, b.grpcDialOption); followerErr != nil { response.Error = followerErr.Error() - glog.Errorf("MaybeConnectToFollowers: %v", followerErr) + log.Errorf("MaybeConnectToFollowers: %v", followerErr) return stream.Send(response) } @@ -88,7 +88,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis AckSequence: acknowledgedSequence, } if err := stream.Send(response); err != nil { - glog.Errorf("Error sending response %v: %v", response, err) + log.Errorf("Error sending response %v: %v", response, err) } // println("sent ack", acknowledgedSequence, "=>", initMessage.PublisherName) lastAckTime = time.Now() @@ -107,7 +107,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis localTopicPartition.Publishers.RemovePublisher(clientName) if localTopicPartition.MaybeShutdownLocalPartition() { b.localTopicManager.RemoveLocalPartition(t, p) - glog.V(0).Infof("Removed local topic %v partition %v", initMessage.Topic, initMessage.Partition) + log.V(3).Infof("Removed local topic %v partition %v", initMessage.Topic, initMessage.Partition) } }() @@ -126,7 +126,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis if err == io.EOF { break } - glog.V(0).Infof("topic %v partition %v publish stream from %s error: %v", initMessage.Topic, initMessage.Partition, initMessage.PublisherName, err) + log.V(3).Infof("topic %v partition %v publish stream from %s error: %v", initMessage.Topic, initMessage.Partition, initMessage.PublisherName, err) break } @@ -145,7 +145,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis } } - glog.V(0).Infof("topic %v partition %v publish stream from %s closed.", initMessage.Topic, initMessage.Partition, initMessage.PublisherName) + log.V(3).Infof("topic %v partition %v publish stream from %s closed.", initMessage.Topic, initMessage.Partition, initMessage.PublisherName) return nil } @@ -155,11 +155,11 @@ func findClientAddress(ctx context.Context) string { // fmt.Printf("FromContext %+v\n", ctx) pr, ok := peer.FromContext(ctx) if !ok { - glog.Error("failed to get peer from ctx") + log.Error("failed to get peer from ctx") return "" } if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") + log.Error("failed to get peer address") return "" } return pr.Addr.String() diff --git a/weed/mq/broker/broker_grpc_pub_balancer.go b/weed/mq/broker/broker_grpc_pub_balancer.go index 5978d2173..cc48ce604 100644 --- a/weed/mq/broker/broker_grpc_pub_balancer.go +++ b/weed/mq/broker/broker_grpc_pub_balancer.go @@ -41,7 +41,7 @@ func (b *MessageQueueBroker) PublisherToPubBalancer(stream mq_pb.SeaweedMessagin } if receivedStats := req.GetStats(); receivedStats != nil { b.PubBalancer.OnBrokerStatsUpdated(initMessage.Broker, brokerStats, receivedStats) - // glog.V(4).Infof("received from %v: %+v", initMessage.Broker, receivedStats) + // log.V(-1).Infof("received from %v: %+v", initMessage.Broker, receivedStats) } } diff --git a/weed/mq/broker/broker_grpc_pub_follow.go b/weed/mq/broker/broker_grpc_pub_follow.go index 291f1ef62..367cd12ef 100644 --- a/weed/mq/broker/broker_grpc_pub_follow.go +++ b/weed/mq/broker/broker_grpc_pub_follow.go @@ -2,7 +2,7 @@ package broker import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" @@ -43,7 +43,7 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi err = nil break } - glog.V(0).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err) + log.V(3).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err) break } @@ -58,14 +58,14 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi if err := stream.Send(&mq_pb.PublishFollowMeResponse{ AckTsNs: dataMessage.TsNs, }); err != nil { - glog.Errorf("Error sending response %v: %v", dataMessage, err) + log.Errorf("Error sending response %v: %v", dataMessage, err) } // println("ack", string(dataMessage.Key), dataMessage.TsNs) } else if closeMessage := req.GetClose(); closeMessage != nil { - glog.V(0).Infof("topic %v partition %v publish stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) + log.V(3).Infof("topic %v partition %v publish stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) break } else if flushMessage := req.GetFlush(); flushMessage != nil { - glog.V(0).Infof("topic %v partition %v publish stream flushed: %v", initMessage.Topic, initMessage.Partition, flushMessage) + log.V(3).Infof("topic %v partition %v publish stream flushed: %v", initMessage.Topic, initMessage.Partition, flushMessage) lastFlushTsNs = flushMessage.TsNs @@ -80,7 +80,7 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi } } else { - glog.Errorf("unknown message: %v", req) + log.Errorf("unknown message: %v", req) } } @@ -104,7 +104,7 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi startTime, stopTime := mem.startTime.UTC(), mem.stopTime.UTC() if stopTime.UnixNano() <= lastFlushTsNs { - glog.V(0).Infof("dropping remaining data at %v %v", t, p) + log.V(3).Infof("dropping remaining data at %v %v", t, p) continue } @@ -114,17 +114,17 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi for { if err := b.appendToFile(targetFile, mem.buf); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) + log.V(3).Infof("metadata log write failed %s: %v", targetFile, err) time.Sleep(737 * time.Millisecond) } else { break } } - glog.V(0).Infof("flushed remaining data at %v to %s size %d", mem.stopTime.UnixNano(), targetFile, len(mem.buf)) + log.V(3).Infof("flushed remaining data at %v to %s size %d", mem.stopTime.UnixNano(), targetFile, len(mem.buf)) } - glog.V(0).Infof("shut down follower for %v %v", t, p) + log.V(3).Infof("shut down follower for %v %v", t, p) return err } @@ -140,7 +140,7 @@ func (b *MessageQueueBroker) buildFollowerLogBuffer(inMemoryBuffers *buffered_qu startTime: startTime, stopTime: stopTime, }) - glog.V(0).Infof("queue up %d~%d size %d", startTime.UnixNano(), stopTime.UnixNano(), len(buf)) + log.V(3).Infof("queue up %d~%d size %d", startTime.UnixNano(), stopTime.UnixNano(), len(buf)) }, nil, func() { }) return lb diff --git a/weed/mq/broker/broker_grpc_sub.go b/weed/mq/broker/broker_grpc_sub.go index 9cdbe8325..ba8efb5e4 100644 --- a/weed/mq/broker/broker_grpc_sub.go +++ b/weed/mq/broker/broker_grpc_sub.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb" @@ -23,7 +23,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs return err } if req.GetInit() == nil { - glog.Errorf("missing init message") + log.Errorf("missing init message") return fmt.Errorf("missing init message") } @@ -33,7 +33,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs t := topic.FromPbTopic(req.GetInit().Topic) partition := topic.FromPbPartition(req.GetInit().GetPartitionOffset().GetPartition()) - glog.V(0).Infof("Subscriber %s on %v %v connected", req.GetInit().ConsumerId, t, partition) + log.V(3).Infof("Subscriber %s on %v %v connected", req.GetInit().ConsumerId, t, partition) localTopicPartition, getOrGenErr := b.GetOrGenerateLocalPartition(t, partition) if getOrGenErr != nil { @@ -41,7 +41,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs } localTopicPartition.Subscribers.AddSubscriber(clientName, topic.NewLocalSubscriber()) - glog.V(0).Infof("Subscriber %s connected on %v %v", clientName, t, partition) + log.V(3).Infof("Subscriber %s connected on %v %v", clientName, t, partition) isConnected := true sleepIntervalCount := 0 @@ -49,7 +49,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs defer func() { isConnected = false localTopicPartition.Subscribers.RemoveSubscriber(clientName) - glog.V(0).Infof("Subscriber %s on %v %v disconnected, sent %d", clientName, t, partition, counter) + log.V(3).Infof("Subscriber %s on %v %v disconnected, sent %d", clientName, t, partition, counter) if localTopicPartition.MaybeShutdownLocalPartition() { b.localTopicManager.RemoveLocalPartition(t, partition) } @@ -60,7 +60,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs // connect to the follower var subscribeFollowMeStream mq_pb.SeaweedMessaging_SubscribeFollowMeClient - glog.V(0).Infof("follower broker: %v", req.GetInit().FollowerBroker) + log.V(3).Infof("follower broker: %v", req.GetInit().FollowerBroker) if req.GetInit().FollowerBroker != "" { follower := req.GetInit().FollowerBroker if followerGrpcConnection, err := pb.GrpcDial(ctx, follower, true, b.grpcDialOption); err != nil { @@ -90,7 +90,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs } } } - glog.V(0).Infof("follower %s connected", follower) + log.V(3).Infof("follower %s connected", follower) } go func() { @@ -107,7 +107,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs }}) break } - glog.V(0).Infof("topic %v partition %v subscriber %s lastOffset %d error: %v", t, partition, clientName, lastOffset, err) + log.V(3).Infof("topic %v partition %v subscriber %s lastOffset %d error: %v", t, partition, clientName, lastOffset, err) break } if ack.GetAck().Key == nil { @@ -125,7 +125,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs }, }, }); err != nil { - glog.Errorf("Error sending ack to follower: %v", err) + log.Errorf("Error sending ack to follower: %v", err) break } lastOffset = currentLastOffset @@ -133,9 +133,9 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs } } if lastOffset > 0 { - glog.V(0).Infof("saveConsumerGroupOffset %v %v %v %v", t, partition, req.GetInit().ConsumerGroup, lastOffset) + log.V(3).Infof("saveConsumerGroupOffset %v %v %v %v", t, partition, req.GetInit().ConsumerGroup, lastOffset) if err := b.saveConsumerGroupOffset(t, partition, req.GetInit().ConsumerGroup, lastOffset); err != nil { - glog.Errorf("saveConsumerGroupOffset partition %v lastOffset %d: %v", partition, lastOffset, err) + log.Errorf("saveConsumerGroupOffset partition %v lastOffset %d: %v", partition, lastOffset, err) } } if subscribeFollowMeStream != nil { @@ -145,7 +145,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs }, }); err != nil { if err != io.EOF { - glog.Errorf("Error sending close to follower: %v", err) + log.Errorf("Error sending close to follower: %v", err) } } } @@ -169,7 +169,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs // Client disconnected return false } - glog.V(0).Infof("Subscriber %s disconnected: %v", clientName, err) + log.V(3).Infof("Subscriber %s disconnected: %v", clientName, err) return false default: // Continue processing the request @@ -190,7 +190,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs // Client disconnected return false, nil } - glog.V(0).Infof("Subscriber %s disconnected: %v", clientName, err) + log.V(3).Infof("Subscriber %s disconnected: %v", clientName, err) return false, nil default: // Continue processing the request @@ -207,7 +207,7 @@ func (b *MessageQueueBroker) SubscribeMessage(stream mq_pb.SeaweedMessaging_Subs TsNs: logEntry.TsNs, }, }}); err != nil { - glog.Errorf("Error sending data: %v", err) + log.Errorf("Error sending data: %v", err) return false, err } @@ -241,7 +241,7 @@ func (b *MessageQueueBroker) getRequestPosition(initMessage *mq_pb.SubscribeMess // try to resume if storedOffset, err := b.readConsumerGroupOffset(initMessage); err == nil { - glog.V(0).Infof("resume from saved offset %v %v %v: %v", initMessage.Topic, initMessage.PartitionOffset.Partition, initMessage.ConsumerGroup, storedOffset) + log.V(3).Infof("resume from saved offset %v %v %v: %v", initMessage.Topic, initMessage.PartitionOffset.Partition, initMessage.ConsumerGroup, storedOffset) startPosition = log_buffer.NewMessagePosition(storedOffset, -2) return } diff --git a/weed/mq/broker/broker_grpc_sub_coordinator.go b/weed/mq/broker/broker_grpc_sub_coordinator.go index 985b0a47e..6dffa606d 100644 --- a/weed/mq/broker/broker_grpc_sub_coordinator.go +++ b/weed/mq/broker/broker_grpc_sub_coordinator.go @@ -3,7 +3,7 @@ package broker import ( "context" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "google.golang.org/grpc/codes" @@ -29,13 +29,13 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess if err != nil { return status.Errorf(codes.InvalidArgument, "failed to add subscriber: %v", err) } - glog.V(0).Infof("subscriber %s/%s/%s connected", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic) + log.V(3).Infof("subscriber %s/%s/%s connected", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic) } else { return status.Errorf(codes.InvalidArgument, "subscriber init message is empty") } defer func() { b.SubCoordinator.RemoveSubscriber(initMessage) - glog.V(0).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) + log.V(3).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) }() ctx := stream.Context() @@ -45,15 +45,15 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess for { req, err := stream.Recv() if err != nil { - glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) + log.V(3).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) } if ackUnAssignment := req.GetAckUnAssignment(); ackUnAssignment != nil { - glog.V(0).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment) + log.V(3).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment) cg.AckUnAssignment(cgi, ackUnAssignment) } if ackAssignment := req.GetAckAssignment(); ackAssignment != nil { - glog.V(0).Infof("subscriber %s/%s/%s ack assignment %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackAssignment) + log.V(3).Infof("subscriber %s/%s/%s ack assignment %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackAssignment) cg.AckAssignment(cgi, ackAssignment) } @@ -80,12 +80,12 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess // Client disconnected return err } - glog.V(0).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) + log.V(3).Infof("subscriber %s/%s/%s disconnected: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) return err case message := <-cgi.ResponseChan: - glog.V(0).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, message) + log.V(3).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, message) if err := stream.Send(message); err != nil { - glog.V(0).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) + log.V(3).Infof("subscriber %s/%s/%s send: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) } } } diff --git a/weed/mq/broker/broker_grpc_sub_follow.go b/weed/mq/broker/broker_grpc_sub_follow.go index bed906c30..39344d4a6 100644 --- a/weed/mq/broker/broker_grpc_sub_follow.go +++ b/weed/mq/broker/broker_grpc_sub_follow.go @@ -3,7 +3,7 @@ package broker import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" @@ -34,7 +34,7 @@ func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_Sub err = nil break } - glog.V(0).Infof("topic %v partition %v subscribe stream error: %v", initMessage.Topic, initMessage.Partition, err) + log.V(3).Infof("topic %v partition %v subscribe stream error: %v", initMessage.Topic, initMessage.Partition, err) break } @@ -43,10 +43,10 @@ func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_Sub lastOffset = ackMessage.TsNs // println("sub follower got offset", lastOffset) } else if closeMessage := req.GetClose(); closeMessage != nil { - glog.V(0).Infof("topic %v partition %v subscribe stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) + log.V(3).Infof("topic %v partition %v subscribe stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) return nil } else { - glog.Errorf("unknown message: %v", req) + log.Errorf("unknown message: %v", req) } } @@ -56,7 +56,7 @@ func (b *MessageQueueBroker) SubscribeFollowMe(stream mq_pb.SeaweedMessaging_Sub err = b.saveConsumerGroupOffset(t, p, initMessage.ConsumerGroup, lastOffset) } - glog.V(0).Infof("shut down follower for %v offset %d", initMessage, lastOffset) + log.V(3).Infof("shut down follower for %v offset %d", initMessage, lastOffset) return err } @@ -90,7 +90,7 @@ func (b *MessageQueueBroker) saveConsumerGroupOffset(t topic.Topic, p topic.Part util.Uint64toBytes(offsetBytes, uint64(offset)) return b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { - glog.V(0).Infof("saving topic %s partition %v consumer group %s offset %d", t, p, consumerGroup, offset) + log.V(3).Infof("saving topic %s partition %v consumer group %s offset %d", t, p, consumerGroup, offset) return filer.SaveInsideFiler(client, partitionDir, offsetFileName, offsetBytes) }) } diff --git a/weed/mq/broker/broker_server.go b/weed/mq/broker/broker_server.go index d80fa91a4..ef1d6ec75 100644 --- a/weed/mq/broker/broker_server.go +++ b/weed/mq/broker/broker_server.go @@ -3,7 +3,7 @@ package broker import ( "context" "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" "github.com/seaweedfs/seaweedfs/weed/mq/sub_coordinator" "github.com/seaweedfs/seaweedfs/weed/mq/topic" @@ -89,12 +89,12 @@ func NewMessageBroker(option *MessageQueueBrokerOption, grpcDialOption grpc.Dial time.Sleep(time.Millisecond * 237) } self := option.BrokerAddress() - glog.V(0).Infof("broker %s found filer %s", self, mqBroker.currentFiler) + log.V(3).Infof("broker %s found filer %s", self, mqBroker.currentFiler) newBrokerBalancerCh := make(chan string, 1) lockClient := cluster.NewLockClient(grpcDialOption, mqBroker.currentFiler) mqBroker.lockAsBalancer = lockClient.StartLongLivedLock(pub_balancer.LockBrokerBalancer, string(self), func(newLockOwner string) { - glog.V(0).Infof("broker %s found balanacer %s", self, newLockOwner) + log.V(3).Infof("broker %s found balanacer %s", self, newLockOwner) newBrokerBalancerCh <- newLockOwner }) mqBroker.KeepConnectedToBrokerBalancer(newBrokerBalancerCh) diff --git a/weed/mq/broker/broker_topic_conf_read_write.go b/weed/mq/broker/broker_topic_conf_read_write.go index 222ff16ba..cc1cbce19 100644 --- a/weed/mq/broker/broker_topic_conf_read_write.go +++ b/weed/mq/broker/broker_topic_conf_read_write.go @@ -2,7 +2,7 @@ package broker import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/logstore" "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" "github.com/seaweedfs/seaweedfs/weed/mq/topic" @@ -13,12 +13,12 @@ func (b *MessageQueueBroker) GetOrGenerateLocalPartition(t topic.Topic, partitio // get or generate a local partition conf, readConfErr := b.fca.ReadTopicConfFromFiler(t) if readConfErr != nil { - glog.Errorf("topic %v not found: %v", t, readConfErr) + log.Errorf("topic %v not found: %v", t, readConfErr) return nil, fmt.Errorf("topic %v not found: %v", t, readConfErr) } localTopicPartition, _, getOrGenError = b.doGetOrGenLocalPartition(t, partition, conf) if getOrGenError != nil { - glog.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError) + log.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError) return nil, fmt.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError) } return localTopicPartition, nil @@ -55,7 +55,7 @@ func (b *MessageQueueBroker) ensureTopicActiveAssignments(t topic.Topic, conf *m // also fix assignee broker if invalid hasChanges := pub_balancer.EnsureAssignmentsToActiveBrokers(b.PubBalancer.Brokers, 1, conf.BrokerPartitionAssignments) if hasChanges { - glog.V(0).Infof("topic %v partition updated assignments: %v", t, conf.BrokerPartitionAssignments) + log.V(3).Infof("topic %v partition updated assignments: %v", t, conf.BrokerPartitionAssignments) if err = b.fca.SaveTopicConfToFiler(t, conf); err != nil { return err } diff --git a/weed/mq/broker/broker_topic_partition_read_write.go b/weed/mq/broker/broker_topic_partition_read_write.go index d6513b2a2..94ae45b73 100644 --- a/weed/mq/broker/broker_topic_partition_read_write.go +++ b/weed/mq/broker/broker_topic_partition_read_write.go @@ -2,7 +2,7 @@ package broker import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" "sync/atomic" @@ -25,7 +25,7 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, p topic.Partition) l for { if err := b.appendToFile(targetFile, buf); err != nil { - glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) + log.V(3).Infof("metadata log write failed %s: %v", targetFile, err) time.Sleep(737 * time.Millisecond) } else { break @@ -40,6 +40,6 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, p topic.Partition) l localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs) } - glog.V(0).Infof("flushing at %d to %s size %d", logBuffer.LastFlushTsNs, targetFile, len(buf)) + log.V(3).Infof("flushing at %d to %s size %d", logBuffer.LastFlushTsNs, targetFile, len(buf)) } } diff --git a/weed/mq/client/pub_client/scheduler.go b/weed/mq/client/pub_client/scheduler.go index a768fa7f8..529efc693 100644 --- a/weed/mq/client/pub_client/scheduler.go +++ b/weed/mq/client/pub_client/scheduler.go @@ -3,19 +3,19 @@ package pub_client import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "sort" + "sync" + "sync/atomic" + "time" + "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" - "log" - "sort" - "sync" - "sync/atomic" - "time" ) type EachPartitionError struct { @@ -33,27 +33,26 @@ type EachPartitionPublishJob struct { } func (p *TopicPublisher) startSchedulerThread(wg *sync.WaitGroup) error { - if err := p.doConfigureTopic(); err != nil { wg.Done() return fmt.Errorf("configure topic %s: %v", p.config.Topic, err) } - log.Printf("start scheduler thread for topic %s", p.config.Topic) + log.Infof("start scheduler thread for topic %s", p.config.Topic) generation := 0 var errChan chan EachPartitionError for { - glog.V(0).Infof("lookup partitions gen %d topic %s", generation+1, p.config.Topic) + log.V(3).Infof("lookup partitions gen %d topic %s", generation+1, p.config.Topic) if assignments, err := p.doLookupTopicPartitions(); err == nil { generation++ - glog.V(0).Infof("start generation %d with %d assignments", generation, len(assignments)) + log.V(3).Infof("start generation %d with %d assignments", generation, len(assignments)) if errChan == nil { errChan = make(chan EachPartitionError, len(assignments)) } p.onEachAssignments(generation, assignments, errChan) } else { - glog.Errorf("lookup topic %s: %v", p.config.Topic, err) + log.Errorf("lookup topic %s: %v", p.config.Topic, err) time.Sleep(5 * time.Second) continue } @@ -66,7 +65,7 @@ func (p *TopicPublisher) startSchedulerThread(wg *sync.WaitGroup) error { for { select { case eachErr := <-errChan: - glog.Errorf("gen %d publish to topic %s partition %v: %v", eachErr.generation, p.config.Topic, eachErr.Partition, eachErr.Err) + log.Errorf("gen %d publish to topic %s partition %v: %v", eachErr.generation, p.config.Topic, eachErr.Partition, eachErr.Err) if eachErr.generation < generation { continue } @@ -114,7 +113,7 @@ func (p *TopicPublisher) onEachAssignments(generation int, assignments []*mq_pb. go func(job *EachPartitionPublishJob) { defer job.wg.Done() if err := p.doPublishToPartition(job); err != nil { - log.Printf("publish to %s partition %v: %v", p.config.Topic, job.Partition, err) + log.Infof("publish to %s partition %v: %v", p.config.Topic, job.Partition, err) errChan <- EachPartitionError{assignment, err, generation} } }(job) @@ -127,8 +126,7 @@ func (p *TopicPublisher) onEachAssignments(generation int, assignments []*mq_pb. } func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) error { - - log.Printf("connecting to %v for topic partition %+v", job.LeaderBroker, job.Partition) + log.Infof("connecting to %v for topic partition %+v", job.LeaderBroker, job.Partition) grpcConnection, err := grpc.NewClient(job.LeaderBroker, grpc.WithTransportCredentials(insecure.NewCredentials()), p.grpcDialOption) if err != nil { @@ -159,10 +157,19 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro // process the hello message resp, err := stream.Recv() if err != nil { - return fmt.Errorf("recv init response: %v", err) + e, _ := status.FromError(err) + if e.Code() == codes.Unknown && e.Message() == "EOF" { + log.Infof("publish to %s EOF", publishClient.Broker) + return nil + } + publishClient.Err = err + log.Errorf("publish1 to %s error: %v", publishClient.Broker, err) + return err } if resp.Error != "" { - return fmt.Errorf("init response error: %v", resp.Error) + publishClient.Err = fmt.Errorf("ack error: %v", resp.Error) + log.Errorf("publish2 to %s error: %v", publishClient.Broker, resp.Error) + return fmt.Errorf("ack error: %v", resp.Error) } var publishedTsNs int64 @@ -176,20 +183,20 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro if err != nil { e, _ := status.FromError(err) if e.Code() == codes.Unknown && e.Message() == "EOF" { - log.Printf("publish to %s EOF", publishClient.Broker) + log.Infof("publish to %s EOF", publishClient.Broker) return } publishClient.Err = err - log.Printf("publish1 to %s error: %v\n", publishClient.Broker, err) + log.Errorf("publish1 to %s error: %v", publishClient.Broker, err) return } if ackResp.Error != "" { publishClient.Err = fmt.Errorf("ack error: %v", ackResp.Error) - log.Printf("publish2 to %s error: %v\n", publishClient.Broker, ackResp.Error) + log.Errorf("publish2 to %s error: %v", publishClient.Broker, ackResp.Error) return } if ackResp.AckSequence > 0 { - log.Printf("ack %d published %d hasMoreData:%d", ackResp.AckSequence, atomic.LoadInt64(&publishedTsNs), atomic.LoadInt32(&hasMoreData)) + log.Infof("ack %d published %d hasMoreData:%d", ackResp.AckSequence, atomic.LoadInt64(&publishedTsNs), atomic.LoadInt32(&hasMoreData)) } if atomic.LoadInt64(&publishedTsNs) <= ackResp.AckSequence && atomic.LoadInt32(&hasMoreData) == 0 { return @@ -222,7 +229,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro } } - log.Printf("published %d messages to %v for topic partition %+v", publishCounter, job.LeaderBroker, job.Partition) + log.Infof("published %d messages to %v for topic partition %+v", publishCounter, job.LeaderBroker, job.Partition) return nil } @@ -272,7 +279,7 @@ func (p *TopicPublisher) doLookupTopicPartitions() (assignments []*mq_pb.BrokerP &mq_pb.LookupTopicBrokersRequest{ Topic: p.config.Topic.ToPbTopic(), }) - glog.V(0).Infof("lookup topic %s: %v", p.config.Topic, lookupResp) + log.V(3).Infof("lookup topic %s: %v", p.config.Topic, lookupResp) if err != nil { return err diff --git a/weed/mq/client/sub_client/connect_to_sub_coordinator.go b/weed/mq/client/sub_client/connect_to_sub_coordinator.go index e88aaca2f..feccca7a4 100644 --- a/weed/mq/client/sub_client/connect_to_sub_coordinator.go +++ b/weed/mq/client/sub_client/connect_to_sub_coordinator.go @@ -1,7 +1,7 @@ package sub_client import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "time" @@ -29,17 +29,17 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { return nil }) if err != nil { - glog.V(0).Infof("broker coordinator on %s: %v", broker, err) + log.V(3).Infof("broker coordinator on %s: %v", broker, err) continue } - glog.V(0).Infof("found broker coordinator: %v", brokerLeader) + log.V(3).Infof("found broker coordinator: %v", brokerLeader) // connect to the balancer pb.WithBrokerGrpcClient(true, brokerLeader, sub.SubscriberConfig.GrpcDialOption, func(client mq_pb.SeaweedMessagingClient) error { stream, err := client.SubscriberToSubCoordinator(sub.ctx) if err != nil { - glog.V(0).Infof("subscriber %s: %v", sub.ContentConfig.Topic, err) + log.V(3).Infof("subscriber %s: %v", sub.ContentConfig.Topic, err) return err } waitTime = 1 * time.Second @@ -56,7 +56,7 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { }, }, }); err != nil { - glog.V(0).Infof("subscriber %s send init: %v", sub.ContentConfig.Topic, err) + log.V(3).Infof("subscriber %s send init: %v", sub.ContentConfig.Topic, err) return err } @@ -69,9 +69,9 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { default: } - glog.V(0).Infof("subscriber instance %s ack %+v", sub.SubscriberConfig.ConsumerGroupInstanceId, reply) + log.V(3).Infof("subscriber instance %s ack %+v", sub.SubscriberConfig.ConsumerGroupInstanceId, reply) if err := stream.Send(reply); err != nil { - glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err) + log.V(3).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err) return } } @@ -81,7 +81,7 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { for { resp, err := stream.Recv() if err != nil { - glog.V(0).Infof("subscriber %s receive: %v", sub.ContentConfig.Topic, err) + log.V(3).Infof("subscriber %s receive: %v", sub.ContentConfig.Topic, err) return err } @@ -92,13 +92,13 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() { } sub.brokerPartitionAssignmentChan <- resp - glog.V(0).Infof("Received assignment: %+v", resp) + log.V(3).Infof("Received assignment: %+v", resp) } return nil }) } - glog.V(0).Infof("subscriber %s/%s waiting for more assignments", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) + log.V(3).Infof("subscriber %s/%s waiting for more assignments", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) if waitTime < 10*time.Second { waitTime += 1 * time.Second } diff --git a/weed/mq/client/sub_client/on_each_partition.go b/weed/mq/client/sub_client/on_each_partition.go index 14a38cfa8..a931eb71f 100644 --- a/weed/mq/client/sub_client/on_each_partition.go +++ b/weed/mq/client/sub_client/on_each_partition.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" @@ -52,10 +52,10 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig }, }, }); err != nil { - glog.V(0).Infof("subscriber %s connected to partition %+v at %v: %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker, err) + log.V(3).Infof("subscriber %s connected to partition %+v at %v: %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker, err) } - glog.V(0).Infof("subscriber %s connected to partition %+v at %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker) + log.V(3).Infof("subscriber %s connected to partition %+v at %v", sub.ContentConfig.Topic, assigned.Partition, assigned.LeaderBroker) if sub.OnCompletionFunc != nil { defer sub.OnCompletionFunc() @@ -88,7 +88,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig }() for { - // glog.V(0).Infof("subscriber %s/%s waiting for message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) + // log.V(3).Infof("subscriber %s/%s waiting for message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) resp, err := subscribeClient.Recv() if err != nil { if errors.Is(err, io.EOF) { @@ -97,7 +97,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig return fmt.Errorf("subscribe recv: %v", err) } if resp.Message == nil { - glog.V(0).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) + log.V(3).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup) continue } @@ -112,7 +112,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig switch m := resp.Message.(type) { case *mq_pb.SubscribeMessageResponse_Data: if m.Data.Ctrl != nil { - glog.V(2).Infof("subscriber %s received control from producer:%s isClose:%v", sub.SubscriberConfig.ConsumerGroup, m.Data.Ctrl.PublisherName, m.Data.Ctrl.IsClose) + log.V(1).Infof("subscriber %s received control from producer:%s isClose:%v", sub.SubscriberConfig.ConsumerGroup, m.Data.Ctrl.PublisherName, m.Data.Ctrl.IsClose) continue } if len(m.Data.Key) == 0 { @@ -121,7 +121,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig } onDataMessageFn(m) case *mq_pb.SubscribeMessageResponse_Ctrl: - // glog.V(0).Infof("subscriber %s/%s/%s received control %+v", sub.ContentConfig.Namespace, sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, m.Ctrl) + // log.V(3).Infof("subscriber %s/%s/%s received control %+v", sub.ContentConfig.Namespace, sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, m.Ctrl) if m.Ctrl.IsEndOfStream || m.Ctrl.IsEndOfTopic { return io.EOF } diff --git a/weed/mq/client/sub_client/subscribe.go b/weed/mq/client/sub_client/subscribe.go index d4dea3852..1d06e0601 100644 --- a/weed/mq/client/sub_client/subscribe.go +++ b/weed/mq/client/sub_client/subscribe.go @@ -1,7 +1,7 @@ package sub_client import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -60,7 +60,7 @@ func (sub *TopicSubscriber) startProcessors() { <-semaphore wg.Done() }() - glog.V(0).Infof("subscriber %s/%s assigned partition %+v at %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) + log.V(3).Infof("subscriber %s/%s assigned partition %+v at %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{ Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignment{ AckAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignmentMessage{ @@ -84,9 +84,9 @@ func (sub *TopicSubscriber) startProcessors() { err := sub.onEachPartition(assigned, stopChan, onDataMessageFn) if err != nil { - glog.V(0).Infof("subscriber %s/%s partition %+v at %v: %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker, err) + log.V(3).Infof("subscriber %s/%s partition %+v at %v: %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker, err) } else { - glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) + log.V(3).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) } sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{ Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{ @@ -130,7 +130,7 @@ func (sub *TopicSubscriber) waitUntilNoOverlappingPartitionInFlight(topicPartiti } sub.activeProcessorsLock.Unlock() if foundOverlapping { - glog.V(0).Infof("subscriber %s new partition %v waiting for partition %+v to complete", sub.ContentConfig.Topic, topicPartition, overlappedPartition) + log.V(3).Infof("subscriber %s new partition %v waiting for partition %+v to complete", sub.ContentConfig.Topic, topicPartition, overlappedPartition) time.Sleep(1 * time.Second) } } diff --git a/weed/mq/logstore/log_to_parquet.go b/weed/mq/logstore/log_to_parquet.go index 30cad8cc1..c16990c5e 100644 --- a/weed/mq/logstore/log_to_parquet.go +++ b/weed/mq/logstore/log_to_parquet.go @@ -160,7 +160,7 @@ func readAllLogFiles(filerClient filer_pb.FilerClient, partitionDir string, time } logTime, err := time.Parse(topic.TIME_FORMAT, entry.Name) if err != nil { - // glog.Warningf("parse log time %s: %v", entry.Name, err) + // log.Warningf("parse log time %s: %v", entry.Name, err) return nil } if maxTsNs > 0 && logTime.UnixNano() <= maxTsNs { diff --git a/weed/mq/logstore/merged_read.go b/weed/mq/logstore/merged_read.go index 03a47ace4..ea8868a70 100644 --- a/weed/mq/logstore/merged_read.go +++ b/weed/mq/logstore/merged_read.go @@ -17,9 +17,9 @@ func mergeReadFuncs(fromParquetFn, readLogDirectFn log_buffer.LogReadFromDiskFun var lastProcessedPosition log_buffer.MessagePosition return func(startPosition log_buffer.MessagePosition, stopTsNs int64, eachLogEntryFn log_buffer.EachLogEntryFuncType) (lastReadPosition log_buffer.MessagePosition, isDone bool, err error) { if !exhaustedParquet { - // glog.V(4).Infof("reading from parquet startPosition: %v\n", startPosition.UTC()) + // log.V(-1).Infof("reading from parquet startPosition: %v\n", startPosition.UTC()) lastReadPosition, isDone, err = fromParquetFn(startPosition, stopTsNs, eachLogEntryFn) - // glog.V(4).Infof("read from parquet: %v %v %v %v\n", startPosition, lastReadPosition, isDone, err) + // log.V(-1).Infof("read from parquet: %v %v %v %v\n", startPosition, lastReadPosition, isDone, err) if isDone { isDone = false } @@ -34,7 +34,7 @@ func mergeReadFuncs(fromParquetFn, readLogDirectFn log_buffer.LogReadFromDiskFun startPosition = lastProcessedPosition } - // glog.V(4).Infof("reading from direct log startPosition: %v\n", startPosition.UTC()) + // log.V(-1).Infof("reading from direct log startPosition: %v\n", startPosition.UTC()) lastReadPosition, isDone, err = readLogDirectFn(startPosition, stopTsNs, eachLogEntryFn) return } diff --git a/weed/mq/logstore/read_log_from_disk.go b/weed/mq/logstore/read_log_from_disk.go index 71ba58c1f..00e412b40 100644 --- a/weed/mq/logstore/read_log_from_disk.go +++ b/weed/mq/logstore/read_log_from_disk.go @@ -3,7 +3,7 @@ package logstore import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -71,7 +71,7 @@ func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p top continue } if chunk.IsChunkManifest { - glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name) + log.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name) return } urlStrings, err = lookupFileIdFn(chunk.FileId) diff --git a/weed/mq/pub_balancer/allocate.go b/weed/mq/pub_balancer/allocate.go index 46d423b30..94398d6c7 100644 --- a/weed/mq/pub_balancer/allocate.go +++ b/weed/mq/pub_balancer/allocate.go @@ -2,7 +2,7 @@ package pub_balancer import ( cmap "github.com/orcaman/concurrent-map/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" "math/rand" @@ -30,7 +30,7 @@ func AllocateTopicPartitions(brokers cmap.ConcurrentMap[string, *BrokerStats], p EnsureAssignmentsToActiveBrokers(brokers, 1, assignments) - glog.V(0).Infof("allocate topic partitions %d: %v", len(assignments), assignments) + log.V(3).Infof("allocate topic partitions %d: %v", len(assignments), assignments) return } @@ -78,7 +78,7 @@ func pickBrokersExcluded(brokers []string, count int, excludedLeadBroker string, // EnsureAssignmentsToActiveBrokers ensures the assignments are assigned to active brokers func EnsureAssignmentsToActiveBrokers(activeBrokers cmap.ConcurrentMap[string, *BrokerStats], followerCount int, assignments []*mq_pb.BrokerPartitionAssignment) (hasChanges bool) { - glog.V(0).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v", activeBrokers.Count(), followerCount, assignments) + log.V(3).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v", activeBrokers.Count(), followerCount, assignments) candidates := make([]string, 0, activeBrokers.Count()) for brokerStatsItem := range activeBrokers.IterBuffered() { @@ -122,6 +122,6 @@ func EnsureAssignmentsToActiveBrokers(activeBrokers cmap.ConcurrentMap[string, * } - glog.V(0).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v hasChanges: %v", activeBrokers.Count(), followerCount, assignments, hasChanges) + log.V(3).Infof("EnsureAssignmentsToActiveBrokers: activeBrokers: %v, followerCount: %d, assignments: %v hasChanges: %v", activeBrokers.Count(), followerCount, assignments, hasChanges) return } diff --git a/weed/mq/pub_balancer/partition_list_broker.go b/weed/mq/pub_balancer/partition_list_broker.go index 34bdfd286..168f7f83d 100644 --- a/weed/mq/pub_balancer/partition_list_broker.go +++ b/weed/mq/pub_balancer/partition_list_broker.go @@ -1,7 +1,7 @@ package pub_balancer import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" ) @@ -28,11 +28,11 @@ func (ps *PartitionSlotToBrokerList) AddBroker(partition *schema_pb.Partition, b for _, partitionSlot := range ps.PartitionSlots { if partitionSlot.RangeStart == partition.RangeStart && partitionSlot.RangeStop == partition.RangeStop { if partitionSlot.AssignedBroker != "" && partitionSlot.AssignedBroker != broker { - glog.V(0).Infof("partition %s broker change: %s => %s", partition, partitionSlot.AssignedBroker, broker) + log.V(3).Infof("partition %s broker change: %s => %s", partition, partitionSlot.AssignedBroker, broker) partitionSlot.AssignedBroker = broker } if partitionSlot.FollowerBroker != "" && partitionSlot.FollowerBroker != follower { - glog.V(0).Infof("partition %s follower change: %s => %s", partition, partitionSlot.FollowerBroker, follower) + log.V(3).Infof("partition %s follower change: %s => %s", partition, partitionSlot.FollowerBroker, follower) partitionSlot.FollowerBroker = follower } diff --git a/weed/mq/sub_coordinator/consumer_group.go b/weed/mq/sub_coordinator/consumer_group.go index ba94f34b4..afe400989 100644 --- a/weed/mq/sub_coordinator/consumer_group.go +++ b/weed/mq/sub_coordinator/consumer_group.go @@ -4,7 +4,7 @@ import ( "fmt" cmap "github.com/orcaman/concurrent-map/v2" "github.com/seaweedfs/seaweedfs/weed/filer_client" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/pb/schema_pb" @@ -35,7 +35,7 @@ func NewConsumerGroup(t *schema_pb.Topic, reblanceSeconds int32, filerClientAcce } cg.Market = NewMarket(partitions, time.Duration(reblanceSeconds)*time.Second) } else { - glog.V(0).Infof("fail to read topic conf from filer: %v", err) + log.V(3).Infof("fail to read topic conf from filer: %v", err) return nil } @@ -45,7 +45,7 @@ func NewConsumerGroup(t *schema_pb.Topic, reblanceSeconds int32, filerClientAcce case adjustment := <-cg.Market.AdjustmentChan: cgi, found := cg.ConsumerGroupInstances.Get(string(adjustment.consumer)) if !found { - glog.V(0).Infof("consumer group instance %s not found", adjustment.consumer) + log.V(3).Infof("consumer group instance %s not found", adjustment.consumer) continue } if adjustment.isAssign { @@ -63,7 +63,7 @@ func NewConsumerGroup(t *schema_pb.Topic, reblanceSeconds int32, filerClientAcce }, }, } - glog.V(0).Infof("send assignment %v to %s", adjustment.partition, adjustment.consumer) + log.V(3).Infof("send assignment %v to %s", adjustment.partition, adjustment.consumer) break } } @@ -76,7 +76,7 @@ func NewConsumerGroup(t *schema_pb.Topic, reblanceSeconds int32, filerClientAcce }, }, } - glog.V(0).Infof("send unassignment %v to %s", adjustment.partition, adjustment.consumer) + log.V(3).Infof("send unassignment %v to %s", adjustment.partition, adjustment.consumer) } case <-cg.stopCh: return diff --git a/weed/mq/sub_coordinator/market.go b/weed/mq/sub_coordinator/market.go index df07edfd5..7473248f7 100644 --- a/weed/mq/sub_coordinator/market.go +++ b/weed/mq/sub_coordinator/market.go @@ -2,7 +2,7 @@ package sub_coordinator import ( "errors" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/mq/topic" "sync" "time" @@ -290,7 +290,7 @@ func (m *Market) ConfirmAdjustment(adjustment *Adjustment) { } else { m.unassignPartitionSlot(adjustment.partition) } - glog.V(1).Infof("ConfirmAdjustment %+v", adjustment) + log.V(2).Infof("ConfirmAdjustment %+v", adjustment) m.Status() } @@ -300,12 +300,12 @@ func (m *Market) unassignPartitionSlot(partition topic.Partition) { partitionSlot, exists := m.partitions[partition] if !exists { - glog.V(0).Infof("partition %+v slot is not tracked", partition) + log.V(3).Infof("partition %+v slot is not tracked", partition) return } if partitionSlot.AssignedTo == nil { - glog.V(0).Infof("partition %+v slot is not assigned to any consumer", partition) + log.V(3).Infof("partition %+v slot is not assigned to any consumer", partition) return } @@ -319,7 +319,7 @@ func (m *Market) unassignPartitionSlot(partition topic.Partition) { } } - glog.V(0).Infof("partition %+v slot not found in assigned consumer", partition) + log.V(3).Infof("partition %+v slot not found in assigned consumer", partition) } @@ -329,18 +329,18 @@ func (m *Market) confirmAssignPartition(partition topic.Partition, consumerInsta partitionSlot, exists := m.partitions[partition] if !exists { - glog.V(0).Infof("partition %+v slot is not tracked", partition) + log.V(3).Infof("partition %+v slot is not tracked", partition) return } if partitionSlot.AssignedTo != nil { - glog.V(0).Infof("partition %+v slot is already assigned to %+v", partition, partitionSlot.AssignedTo.InstanceId) + log.V(3).Infof("partition %+v slot is already assigned to %+v", partition, partitionSlot.AssignedTo.InstanceId) return } consumerInstance, exists := m.consumerInstances[consumerInstanceId] if !exists { - glog.V(0).Infof("consumer %+v is not tracked", consumerInstanceId) + log.V(3).Infof("consumer %+v is not tracked", consumerInstanceId) return } @@ -353,15 +353,15 @@ func (m *Market) Status() { m.mu.Lock() defer m.mu.Unlock() - glog.V(1).Infof("Market has %d partitions and %d consumer instances", len(m.partitions), len(m.consumerInstances)) + log.V(2).Infof("Market has %d partitions and %d consumer instances", len(m.partitions), len(m.consumerInstances)) for partition, slot := range m.partitions { if slot.AssignedTo == nil { - glog.V(1).Infof("Partition %+v is not assigned to any consumer", partition) + log.V(2).Infof("Partition %+v is not assigned to any consumer", partition) } else { - glog.V(1).Infof("Partition %+v is assigned to consumer %+v", partition, slot.AssignedTo.InstanceId) + log.V(2).Infof("Partition %+v is assigned to consumer %+v", partition, slot.AssignedTo.InstanceId) } } for _, consumer := range m.consumerInstances { - glog.V(1).Infof("Consumer %+v has %d partitions", consumer.InstanceId, len(consumer.AssignedPartitions)) + log.V(2).Infof("Consumer %+v has %d partitions", consumer.InstanceId, len(consumer.AssignedPartitions)) } } diff --git a/weed/mq/topic/local_partition.go b/weed/mq/topic/local_partition.go index d1433775a..158551747 100644 --- a/weed/mq/topic/local_partition.go +++ b/weed/mq/topic/local_partition.go @@ -3,7 +3,7 @@ package topic import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -82,7 +82,7 @@ func (p *LocalPartition) Subscribe(clientName string, startPosition log_buffer.M for { processedPosition, isDone, readPersistedLogErr = p.LogBuffer.ReadFromDiskFn(startPosition, 0, eachMessageFn) if readPersistedLogErr != nil { - glog.V(0).Infof("%s read %v persisted log: %v", clientName, p.Partition, readPersistedLogErr) + log.V(3).Infof("%s read %v persisted log: %v", clientName, p.Partition, readPersistedLogErr) return readPersistedLogErr } if isDone { @@ -104,7 +104,7 @@ func (p *LocalPartition) Subscribe(clientName string, startPosition log_buffer.M continue } if readInMemoryLogErr != nil { - glog.V(0).Infof("%s read %v in memory log: %v", clientName, p.Partition, readInMemoryLogErr) + log.V(3).Infof("%s read %v in memory log: %v", clientName, p.Partition, readInMemoryLogErr) return readInMemoryLogErr } } @@ -179,10 +179,10 @@ func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessa if err != nil { e, _ := status.FromError(err) if e.Code() == codes.Canceled { - glog.V(0).Infof("local partition %v follower %v stopped", p.Partition, p.Follower) + log.V(3).Infof("local partition %v follower %v stopped", p.Partition, p.Follower) return } - glog.Errorf("Receiving local partition %v follower %s ack: %v", p.Partition, p.Follower, err) + log.Errorf("Receiving local partition %v follower %s ack: %v", p.Partition, p.Follower, err) return } atomic.StoreInt64(&p.AckTsNs, ack.AckTsNs) @@ -206,9 +206,9 @@ func (p *LocalPartition) MaybeShutdownLocalPartition() (hasShutdown bool) { Close: &mq_pb.PublishFollowMeRequest_CloseMessage{}, }, }); followErr != nil { - glog.Errorf("Error closing follower stream: %v", followErr) + log.Errorf("Error closing follower stream: %v", followErr) } - glog.V(4).Infof("closing grpcConnection to follower") + log.V(-1).Infof("closing grpcConnection to follower") p.followerGrpcConnection.Close() p.publishFolloweMeStream = nil p.Follower = "" @@ -217,7 +217,7 @@ func (p *LocalPartition) MaybeShutdownLocalPartition() (hasShutdown bool) { hasShutdown = true } - glog.V(0).Infof("local partition %v Publisher:%d Subscriber:%d follower:%s shutdown %v", p.Partition, p.Publishers.Size(), p.Subscribers.Size(), p.Follower, hasShutdown) + log.V(3).Infof("local partition %v Publisher:%d Subscriber:%d follower:%s shutdown %v", p.Partition, p.Publishers.Size(), p.Subscribers.Size(), p.Follower, hasShutdown) return } @@ -225,7 +225,7 @@ func (p *LocalPartition) Shutdown() { p.closePublishers() p.closeSubscribers() p.LogBuffer.ShutdownLogBuffer() - glog.V(0).Infof("local partition %v shutting down", p.Partition) + log.V(3).Infof("local partition %v shutting down", p.Partition) } func (p *LocalPartition) NotifyLogFlushed(flushTsNs int64) { @@ -237,7 +237,7 @@ func (p *LocalPartition) NotifyLogFlushed(flushTsNs int64) { }, }, }); followErr != nil { - glog.Errorf("send follower %s flush message: %v", p.Follower, followErr) + log.Errorf("send follower %s flush message: %v", p.Follower, followErr) } // println("notifying", p.Follower, "flushed at", flushTsNs) } diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index c9e674257..45e21a6ec 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" @@ -28,8 +28,8 @@ func (k *AwsSqsPub) GetName() string { } func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) + log.V(3).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + log.V(3).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_secret_access_key"), diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 1c620f2e6..7248c3259 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -1,7 +1,7 @@ package notification import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" ) @@ -31,11 +31,11 @@ func LoadConfiguration(config *util.ViperProxy, prefix string) { for _, queue := range MessageQueues { if config.GetBool(prefix + queue.GetName() + ".enabled") { if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { - glog.Fatalf("Failed to initialize notification for %s: %+v", + log.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } Queue = queue - glog.V(0).Infof("Configure notification message queue for %s", queue.GetName()) + log.V(3).Infof("Configure notification message queue for %s", queue.GetName()) return } } @@ -49,7 +49,7 @@ func validateOneEnabledQueue(config *util.ViperProxy) { if enabledQueue == "" { enabledQueue = queue.GetName() } else { - glog.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName()) + log.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName()) } } } diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index 131345f9c..1d1955234 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -30,7 +30,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/util" // _ "gocloud.dev/pubsub/azuresb" @@ -78,13 +78,13 @@ func (k *GoCDKPubSub) doReconnect() { k.topic.Shutdown(context.Background()) k.topicLock.RUnlock() for { - glog.Info("Try reconnect") + log.Info("Try reconnect") conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) if err == nil { k.setTopic(rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil)) break } - glog.Error(err) + log.Error(err) time.Sleep(time.Second) } }(conn) @@ -93,10 +93,10 @@ func (k *GoCDKPubSub) doReconnect() { func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { k.topicURL = configuration.GetString(prefix + "topic_url") - glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) + log.V(3).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { - glog.Fatalf("Failed to open topic: %v", err) + log.Fatalf("Failed to open topic: %v", err) } k.setTopic(topic) return nil diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index f5593fa48..2da5c1e51 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -6,7 +6,7 @@ import ( "os" "cloud.google.com/go/pubsub" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/api/option" @@ -26,8 +26,8 @@ func (k *GooglePubSub) GetName() string { } func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) + log.V(3).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + log.V(3).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( configuration.GetString(prefix+"google_application_credentials"), configuration.GetString(prefix+"project_id"), @@ -43,13 +43,13 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top var found bool google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") if !found { - glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml") + log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml") } } client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials)) if err != nil { - glog.Fatalf("Failed to create client: %v", err) + log.Fatalf("Failed to create client: %v", err) } k.topic = client.Topic(topicName) @@ -57,11 +57,11 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top if !exists { k.topic, err = client.CreateTopic(ctx, topicName) if err != nil { - glog.Fatalf("Failed to create topic %s: %v", topicName, err) + log.Fatalf("Failed to create topic %s: %v", topicName, err) } } } else { - glog.Fatalf("Failed to check topic %s: %v", topicName, err) + log.Fatalf("Failed to check topic %s: %v", topicName, err) } return nil diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index 64cb4eaa9..bcb8f30f8 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -2,7 +2,7 @@ package kafka import ( "github.com/Shopify/sarama" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" @@ -22,8 +22,8 @@ func (k *KafkaQueue) GetName() string { } func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) + log.V(3).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + log.V(3).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( configuration.GetStringSlice(prefix+"hosts"), configuration.GetString(prefix+"topic"), @@ -67,7 +67,7 @@ func (k *KafkaQueue) handleSuccess() { for { pm := <-k.producer.Successes() if pm != nil { - glog.V(3).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key) + log.V(0).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key) } } } @@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() { for { err := <-k.producer.Errors() if err != nil { - glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) + log.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) } } } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index cc3557fee..51fb51c0b 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -1,7 +1,7 @@ package kafka import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" @@ -24,6 +24,6 @@ func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) ( func (k *LogQueue) SendMessage(key string, message proto.Message) (err error) { - glog.V(0).Infof("%v: %+v", key, message) + log.V(3).Infof("%v: %+v", key, message) return nil } diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index be3e5c98e..be64f3108 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -12,7 +12,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" @@ -60,7 +60,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) if isCompressed { var err error if buffer, err = util.DecompressData(buffer); err != nil { - glog.V(0).Infof("fail to decompress chunk manifest: %v", err) + log.V(3).Infof("fail to decompress chunk manifest: %v", err) } } cm := ChunkManifest{} @@ -82,12 +82,12 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g } results, err := DeleteFileIds(masterFn, usePublicUrl, grpcDialOption, fileIds) if err != nil { - glog.V(0).Infof("delete %+v: %v", fileIds, err) + log.V(3).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) } for _, result := range results { if result.Error != "" { - glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error) + log.V(3).Infof("delete file %+v: %v", result.FileId, result.Error) return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error) } } diff --git a/weed/operation/lookup_vid_cache.go b/weed/operation/lookup_vid_cache.go index 248fc17de..79e8738e8 100644 --- a/weed/operation/lookup_vid_cache.go +++ b/weed/operation/lookup_vid_cache.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var ErrorNotFound = errors.New("not found") @@ -23,7 +23,7 @@ type VidCache struct { func (vc *VidCache) Get(vid string) ([]Location, error) { id, err := strconv.Atoi(vid) if err != nil { - glog.V(1).Infof("Unknown volume id %s", vid) + log.V(2).Infof("Unknown volume id %s", vid) return nil, err } vc.RLock() @@ -42,7 +42,7 @@ func (vc *VidCache) Get(vid string) ([]Location, error) { func (vc *VidCache) Set(vid string, locations []Location, duration time.Duration) { id, err := strconv.Atoi(vid) if err != nil { - glog.V(1).Infof("Unknown volume id %s", vid) + log.V(2).Infof("Unknown volume id %s", vid) return } vc.Lock() diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 9470afced..c9420d81f 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -14,7 +14,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/security" ) @@ -102,14 +102,14 @@ func newFilePart(fullPathFilename string) (ret *FilePart, err error) { ret = &FilePart{} fh, openErr := os.Open(fullPathFilename) if openErr != nil { - glog.V(0).Info("Failed to open file: ", fullPathFilename) + log.V(3).Info("Failed to open file: ", fullPathFilename) return ret, openErr } ret.Reader = fh fi, fiErr := fh.Stat() if fiErr != nil { - glog.V(0).Info("Failed to stat file:", fullPathFilename) + log.V(3).Info("Failed to stat file:", fullPathFilename) return ret, fiErr } ret.ModTime = fi.ModTime().UTC().Unix() @@ -251,7 +251,7 @@ func genFileUrl(ret *AssignResult, id string, usePublicUrl bool) string { func uploadOneChunk(filename string, reader io.Reader, masterFn GetMasterFn, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { - glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") + log.V(-1).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadOption := &UploadOption{ UploadUrl: fileUrl, Filename: filename, @@ -279,7 +279,7 @@ func uploadChunkedFileManifest(fileUrl string, manifest *ChunkManifest, jwt secu if e != nil { return e } - glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") + log.V(-1).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 0cf6bf7cf..e583d97a7 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -16,7 +16,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -114,7 +114,7 @@ func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assi if grpcAssignErr := filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { resp, assignErr := client.AssignVolume(context.Background(), assignRequest) if assignErr != nil { - glog.V(0).Infof("assign volume failure %v: %v", assignRequest, assignErr) + log.V(3).Infof("assign volume failure %v: %v", assignRequest, assignErr) return assignErr } if resp.Error != "" { @@ -139,7 +139,7 @@ func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assi } if uploadOption.RetryForever { util.RetryUntil("uploadWithRetryForever", doUploadFunc, func(err error) (shouldContinue bool) { - glog.V(0).Infof("upload content: %v", err) + log.V(3).Infof("upload content: %v", err) return true }) } else { @@ -187,7 +187,7 @@ func (uploader *Uploader) retriedUploadData(data []byte, option *UploadOption) ( uploadResult.RetryCount = i return } - glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err) + log.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err) } return } @@ -329,16 +329,16 @@ func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) er file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { - glog.V(0).Infoln("error creating form file", cp_err.Error()) + log.V(3).Infoln("error creating form file", cp_err.Error()) return nil, cp_err } if err := fillBufferFunction(file_writer); err != nil { - glog.V(0).Infoln("error copying data", err) + log.V(3).Infoln("error copying data", err) return nil, err } content_type := body_writer.FormDataContentType() if err := body_writer.Close(); err != nil { - glog.V(0).Infoln("error closing body", err) + log.V(3).Infoln("error closing body", err) return nil, err } if option.BytesBuffer == nil { @@ -348,7 +348,7 @@ func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) er } req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader) if postErr != nil { - glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr) + log.V(2).Infof("create upload request %s: %v", option.UploadUrl, postErr) return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr) } req.Header.Set("Content-Type", content_type) @@ -364,7 +364,7 @@ func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) er if post_err != nil { if strings.Contains(post_err.Error(), "connection reset by peer") || strings.Contains(post_err.Error(), "use of closed network connection") { - glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr) + log.V(2).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr) stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc() resp, post_err = uploader.httpClient.Do(req) defer util_http.CloseResponse(resp) @@ -389,7 +389,7 @@ func (uploader *Uploader) upload_content(fillBufferFunction func(w io.Writer) er unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body)) + log.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body)) return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err) } if ret.Error != "" { diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 35cea71b3..c58ce1be4 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -36,15 +36,15 @@ func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry Name: name, } - // glog.V(3).Infof("read %s request: %v", fullFilePath, request) + // log.V(0).Infof("read %s request: %v", fullFilePath, request) resp, err := LookupEntry(client, request) if err != nil { - glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) + log.V(0).Infof("read %s %v: %v", fullFilePath, resp, err) return err } if resp.Entry == nil { - // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + // log.V(0).Infof("read %s entry: %v", fullFilePath, entry) return nil } @@ -117,7 +117,7 @@ func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix InclusiveStartFrom: inclusive, } - glog.V(4).Infof("read directory: %v", request) + log.V(-1).Infof("read directory: %v", request) ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := client.ListEntries(ctx, request) @@ -165,14 +165,14 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin Name: entryName, } - glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + log.V(-1).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) resp, err := LookupEntry(client, request) if err != nil { if err == ErrNotFound { exists = false return nil } - glog.V(0).Infof("exists entry %v: %v", request, err) + log.V(3).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -193,9 +193,9 @@ func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string Entry: entry, } - glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) + log.V(-1).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) if err := UpdateEntry(client, request); err != nil { - glog.V(0).Infof("touch exists entry %v: %v", request, err) + log.V(3).Infof("touch exists entry %v: %v", request, err) return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -232,9 +232,9 @@ func DoMkdir(client SeaweedFilerClient, parentDirectoryPath string, dirName stri Entry: entry, } - glog.V(1).Infof("mkdir: %v", request) + log.V(2).Infof("mkdir: %v", request) if err := CreateEntry(client, request); err != nil { - glog.V(0).Infof("mkdir %v: %v", request, err) + log.V(3).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -266,9 +266,9 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string Entry: entry, } - glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + log.V(2).Infof("create file: %s/%s", parentDirectoryPath, fileName) if err := CreateEntry(client, request); err != nil { - glog.V(0).Infof("create file %v:%v", request, err) + log.V(3).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go index e43443706..641294cd3 100644 --- a/weed/pb/filer_pb/filer_client_bfs.go +++ b/weed/pb/filer_pb/filer_client_bfs.go @@ -3,7 +3,7 @@ package filer_pb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "io" "sync" "time" @@ -70,7 +70,7 @@ func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queu } func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64, fn func(parentPath util.FullPath, entry *Entry) error) (err error) { - glog.V(0).Infof("TraverseBfsMetadata %v if before %v", dir, time.Unix(0, olderThanTsNs)) + log.V(3).Infof("TraverseBfsMetadata %v if before %v", dir, time.Unix(0, olderThanTsNs)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := client.TraverseBfsMetadata(ctx, &TraverseBfsMetadataRequest{ diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 0ec31420c..967166fef 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/viant/ptrie" "google.golang.org/protobuf/proto" @@ -111,11 +111,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) { func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { resp, err := client.CreateEntry(context.Background(), request) if err != nil { - glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + log.V(2).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) return fmt.Errorf("CreateEntry: %v", err) } if resp.Error != "" { - glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) + log.V(2).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) return fmt.Errorf("CreateEntry : %v", resp.Error) } return nil @@ -124,7 +124,7 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error { _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err) + log.V(2).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err) return fmt.Errorf("UpdateEntry: %v", err) } return nil @@ -136,7 +136,7 @@ func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { return nil, ErrNotFound } - glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + log.V(0).Infof("read %s/%v: %v", request.Directory, request.Name, err) return nil, fmt.Errorf("LookupEntry1: %v", err) } if resp.Entry == nil { diff --git a/weed/pb/filer_pb_tail.go b/weed/pb/filer_pb_tail.go index b7cca7585..c29d36057 100644 --- a/weed/pb/filer_pb_tail.go +++ b/weed/pb/filer_pb_tail.go @@ -3,7 +3,7 @@ package pb import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/grpc" @@ -87,20 +87,20 @@ func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn Proc if err := processEventFn(resp); err != nil { switch option.EventErrorType { case TrivialOnError: - glog.Errorf("process %v: %v", resp, err) + log.Errorf("process %v: %v", resp, err) case FatalOnError: - glog.Fatalf("process %v: %v", resp, err) + log.Fatalf("process %v: %v", resp, err) case RetryForeverOnError: util.RetryUntil("followMetaUpdates", func() error { return processEventFn(resp) }, func(err error) bool { - glog.Errorf("process %v: %v", resp, err) + log.Errorf("process %v: %v", resp, err) return true }) case DontLogError: // pass default: - glog.Errorf("process %v: %v", resp, err) + log.Errorf("process %v: %v", resp, err) } } option.StartTsNs = resp.TsNs diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go index 777dfb402..ebeee328a 100644 --- a/weed/pb/grpc_client_server.go +++ b/weed/pb/grpc_client_server.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -190,7 +190,7 @@ func ServerToGrpcAddress(server string) (serverGrpcAddress string) { host, port, parseErr := hostAndPort(server) if parseErr != nil { - glog.Fatalf("server address %s parse error: %v", server, parseErr) + log.Fatalf("server address %s parse error: %v", server, parseErr) } grpcPort := int(port) + 10000 @@ -201,7 +201,7 @@ func ServerToGrpcAddress(server string) (serverGrpcAddress string) { func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) { host, grpcPort, parseErr := hostAndPort(grpcAddress) if parseErr != nil { - glog.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr) + log.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr) } port := int(grpcPort) - 10000 diff --git a/weed/pb/server_discovery.go b/weed/pb/server_discovery.go index 25c0360c5..f0fe2221a 100644 --- a/weed/pb/server_discovery.go +++ b/weed/pb/server_discovery.go @@ -1,7 +1,7 @@ package pb import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "reflect" ) @@ -28,10 +28,10 @@ func (sd *ServerDiscovery) RefreshBySrvIfAvailable() { } newList, err := sd.srvRecord.LookUp() if err != nil { - glog.V(0).Infof("failed to lookup SRV for %s: %v", *sd.srvRecord, err) + log.V(3).Infof("failed to lookup SRV for %s: %v", *sd.srvRecord, err) } if newList == nil || len(newList) == 0 { - glog.V(0).Infof("looked up SRV for %s, but found no well-formed names", *sd.srvRecord) + log.V(3).Infof("looked up SRV for %s, but found no well-formed names", *sd.srvRecord) return } if !reflect.DeepEqual(sd.list, newList) { diff --git a/weed/remote_storage/gcs/gcs_storage_client.go b/weed/remote_storage/gcs/gcs_storage_client.go index b048effd9..90c4f198b 100644 --- a/weed/remote_storage/gcs/gcs_storage_client.go +++ b/weed/remote_storage/gcs/gcs_storage_client.go @@ -9,7 +9,7 @@ import ( "strings" "cloud.google.com/go/storage" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/remote_pb" "github.com/seaweedfs/seaweedfs/weed/remote_storage" @@ -48,7 +48,7 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage. found := false projectID, found = os.LookupEnv("GOOGLE_CLOUD_PROJECT") if !found { - glog.Warningf("need to specific GOOGLE_CLOUD_PROJECT env variable") + log.Warningf("need to specific GOOGLE_CLOUD_PROJECT env variable") } } diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go index 4a77fd04a..c3d970f69 100644 --- a/weed/replication/repl_util/replication_util.go +++ b/weed/replication/repl_util/replication_util.go @@ -2,7 +2,7 @@ package repl_util import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/replication/source" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" ) @@ -25,9 +25,9 @@ func CopyFromChunkViews(chunkViews *filer.IntervalList[*filer.ChunkView], filerS writeErr = writeFunc(data) }) if err != nil { - glog.V(1).Infof("read from %s: %v", fileUrl, err) + log.V(2).Infof("read from %s: %v", fileUrl, err) } else if writeErr != nil { - glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr) + log.V(2).Infof("copy from %s: %v", fileUrl, writeErr) } else { break } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 57aa63e5f..b8595098f 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -40,12 +40,12 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p return nil } if !strings.HasPrefix(key, r.source.Dir) { - glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) + log.V(-1).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } for _, excludeDir := range r.excludeDirs { if strings.HasPrefix(key, excludeDir) { - glog.V(4).Infof("skipping %v of exclude dir %v", key, excludeDir) + log.V(-1).Infof("skipping %v of exclude dir %v", key, excludeDir) return nil } } @@ -61,24 +61,24 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p dateKey = time.Unix(mTime, 0).Format("2006-01-02") } newKey := util.Join(r.sink.GetSinkToDirectory(), dateKey, key[len(r.source.Dir):]) - glog.V(3).Infof("replicate %s => %s", key, newKey) + log.V(0).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { - glog.V(4).Infof("deleting %v", key) + log.V(-1).Infof("deleting %v", key) return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) } if message.OldEntry == nil && message.NewEntry != nil { - glog.V(4).Infof("creating %v", key) + log.V(-1).Infof("creating %v", key) return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) } if message.OldEntry == nil && message.NewEntry == nil { - glog.V(0).Infof("weird message %+v", message) + log.V(3).Infof("weird message %+v", message) return nil } foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) if foundExisting { - glog.V(4).Infof("updated %v", key) + log.V(-1).Infof("updated %v", key) return err } @@ -87,7 +87,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p return fmt.Errorf("delete old entry %v: %v", key, err) } - glog.V(4).Infof("creating missing %v", key) + log.V(-1).Infof("creating missing %v", key) return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index fb2f9ff82..26908804d 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -12,7 +12,7 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -64,7 +64,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e // Use your Storage account's name and key to create a credential object. credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) if err != nil { - glog.Fatalf("failed to create Azure credential with account name:%s: %v", accountName, err) + log.Fatalf("failed to create Azure credential with account name:%s: %v", accountName, err) } // Create a request pipeline that is used to process HTTP(S) requests and responses. @@ -118,7 +118,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [] res, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, accessCondition, azblob.BlobTagsMap{}, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) if res != nil && res.StatusCode() == http.StatusPreconditionFailed { - glog.V(0).Infof("skip overwriting %s/%s: %v", g.container, key, err) + log.V(3).Infof("skip overwriting %s/%s: %v", g.container, key, err) return nil } if err != nil { diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 4bcbc7898..a9a422c9b 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" @@ -93,7 +93,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) uploader, err := operation.NewUploader() if err != nil { - glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) + log.V(3).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) return "", fmt.Errorf("upload data: %v", err) } @@ -120,18 +120,18 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) if fs.writeChunkByFiler { fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId) } - glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) + log.V(-1).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) return fileUrl }, resp.Body, ) if err != nil { - glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) + log.V(3).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err) return "", fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v: %v", filename, err) + log.V(3).Infof("upload failure %v: %v", filename, err) return "", fmt.Errorf("upload result: %v", uploadResult.Error) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 49f6877a0..6e5d52327 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -12,7 +12,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -94,10 +94,10 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo dir, name := util.FullPath(key).DirAndName() - glog.V(4).Infof("delete entry: %v", key) + log.V(-1).Infof("delete entry: %v", key) err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures) if err != nil { - glog.V(0).Infof("delete entry %s: %v", key, err) + log.V(3).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) } return nil @@ -114,14 +114,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ Directory: dir, Name: name, } - // glog.V(1).Infof("lookup: %v", lookupRequest) + // log.V(2).Infof("lookup: %v", lookupRequest) if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if filer.ETag(resp.Entry) == filer.ETag(entry) { - glog.V(3).Infof("already replicated %s", key) + log.V(0).Infof("already replicated %s", key) return nil } if resp.Entry.Attributes != nil && resp.Entry.Attributes.Mtime >= entry.Attributes.Mtime { - glog.V(3).Infof("skip overwriting %s", key) + log.V(0).Infof("skip overwriting %s", key) return nil } } @@ -130,11 +130,11 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ if err != nil { // only warning here since the source chunk may have been deleted already - glog.Warningf("replicate entry chunks %s: %v", key, err) + log.Warningf("replicate entry chunks %s: %v", key, err) return nil } - // glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks) + // log.V(-1).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks) request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -151,9 +151,9 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [ Signatures: signatures, } - glog.V(3).Infof("create: %v", request) + log.V(0).Infof("create: %v", request) if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("create entry %s: %v", key, err) + log.V(3).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -174,10 +174,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent Name: name, } - glog.V(4).Infof("lookup entry: %v", request) + log.V(-1).Infof("lookup entry: %v", request) resp, err := filer_pb.LookupEntry(client, request) if err != nil { - glog.V(0).Infof("lookup %s: %v", key, err) + log.V(3).Infof("lookup %s: %v", key, err) return err } @@ -190,12 +190,12 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent return false, fmt.Errorf("lookup %s: %v", key, err) } - glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) + log.V(-1).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime { // skip if already changed // this usually happens when the messages are not ordered - glog.V(2).Infof("late updates %s", key) + log.V(1).Infof("late updates %s", key) } else { // find out what changed deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) @@ -212,7 +212,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // replicate the chunks that are new in the source replicatedChunks, err := fs.replicateChunks(newChunks, key) if err != nil { - glog.Warningf("replicate entry chunks %s: %v", key, err) + log.Warningf("replicate entry chunks %s: %v", key, err) return true, nil } existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...) diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index db6ea4aec..f820ae9a6 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -10,7 +10,7 @@ import ( "google.golang.org/api/option" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -63,12 +63,12 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str var found bool google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") if !found { - glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") + log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { - glog.Fatalf("Failed to create client: %v", err) + log.Fatalf("Failed to create client: %v", err) } g.client = client diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go index c6dddb80a..fd6745b5d 100644 --- a/weed/replication/sink/localsink/local_sink.go +++ b/weed/replication/sink/localsink/local_sink.go @@ -2,7 +2,7 @@ package localsink import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/repl_util" "github.com/seaweedfs/seaweedfs/weed/replication/sink" @@ -45,7 +45,7 @@ func (localsink *LocalSink) initialize(dir string, isIncremental bool) error { func (localsink *LocalSink) Initialize(configuration util.Configuration, prefix string) error { dir := configuration.GetString(prefix + "directory") isIncremental := configuration.GetBool(prefix + "is_incremental") - glog.V(4).Infof("sink.local.directory: %v", dir) + log.V(-1).Infof("sink.local.directory: %v", dir) return localsink.initialize(dir, isIncremental) } @@ -61,9 +61,9 @@ func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeCh if localsink.isMultiPartEntry(key) { return nil } - glog.V(4).Infof("Delete Entry key: %s", key) + log.V(-1).Infof("Delete Entry key: %s", key) if err := os.Remove(key); err != nil { - glog.V(0).Infof("remove entry key %s: %s", key, err) + log.V(3).Infof("remove entry key %s: %s", key, err) } return nil } @@ -72,7 +72,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa if entry.IsDirectory || localsink.isMultiPartEntry(key) { return nil } - glog.V(4).Infof("Create Entry key: %s", key) + log.V(-1).Infof("Create Entry key: %s", key) totalSize := filer.FileSize(entry) chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.GetChunks(), 0, int64(totalSize)) @@ -80,7 +80,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa dir := filepath.Dir(key) if _, err := os.Stat(dir); os.IsNotExist(err) { - glog.V(4).Infof("Create Directory key: %s", dir) + log.V(-1).Infof("Create Directory key: %s", dir) if err = os.MkdirAll(dir, 0755); err != nil { return err } @@ -102,7 +102,7 @@ func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signa return err } if fi.Mode() != mode { - glog.V(4).Infof("Modify file mode: %o -> %o", fi.Mode(), mode) + log.V(-1).Infof("Modify file mode: %o -> %o", fi.Mode(), mode) if err := dstFile.Chmod(mode); err != nil { return err } @@ -128,7 +128,7 @@ func (localsink *LocalSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, ne if localsink.isMultiPartEntry(key) { return true, nil } - glog.V(4).Infof("Update Entry key: %s", key) + log.V(-1).Infof("Update Entry key: %s", key) // do delete and create foundExistingEntry = util.FileExists(key) err = localsink.CreateEntry(key, newEntry, signatures) diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 279108e16..9e74cf9c1 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -14,7 +14,7 @@ import ( "strings" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/replication/sink" "github.com/seaweedfs/seaweedfs/weed/replication/source" @@ -76,24 +76,24 @@ func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string s3sink.uploaderPartSizeMb = configuration.GetInt(prefix + "uploader_part_size") s3sink.uploaderConcurrency = configuration.GetInt(prefix + "uploader_concurrency") - glog.V(0).Infof("sink.s3.region: %v", s3sink.region) - glog.V(0).Infof("sink.s3.bucket: %v", s3sink.bucket) - glog.V(0).Infof("sink.s3.directory: %v", s3sink.dir) - glog.V(0).Infof("sink.s3.endpoint: %v", s3sink.endpoint) - glog.V(0).Infof("sink.s3.acl: %v", s3sink.acl) - glog.V(0).Infof("sink.s3.is_incremental: %v", s3sink.isIncremental) - glog.V(0).Infof("sink.s3.s3_disable_content_md5_validation: %v", s3sink.s3DisableContentMD5Validation) - glog.V(0).Infof("sink.s3.s3_force_path_style: %v", s3sink.s3ForcePathStyle) - glog.V(0).Infof("sink.s3.keep_part_size: %v", s3sink.keepPartSize) + log.V(3).Infof("sink.s3.region: %v", s3sink.region) + log.V(3).Infof("sink.s3.bucket: %v", s3sink.bucket) + log.V(3).Infof("sink.s3.directory: %v", s3sink.dir) + log.V(3).Infof("sink.s3.endpoint: %v", s3sink.endpoint) + log.V(3).Infof("sink.s3.acl: %v", s3sink.acl) + log.V(3).Infof("sink.s3.is_incremental: %v", s3sink.isIncremental) + log.V(3).Infof("sink.s3.s3_disable_content_md5_validation: %v", s3sink.s3DisableContentMD5Validation) + log.V(3).Infof("sink.s3.s3_force_path_style: %v", s3sink.s3ForcePathStyle) + log.V(3).Infof("sink.s3.keep_part_size: %v", s3sink.keepPartSize) if s3sink.uploaderMaxUploadParts > s3manager.MaxUploadParts { s3sink.uploaderMaxUploadParts = s3manager.MaxUploadParts - glog.Warningf("uploader_max_upload_parts is greater than the maximum number of parts allowed when uploading multiple parts to Amazon S3") - glog.V(0).Infof("sink.s3.uploader_max_upload_parts: %v => %v", s3sink.uploaderMaxUploadParts, s3manager.MaxUploadParts) + log.Warningf("uploader_max_upload_parts is greater than the maximum number of parts allowed when uploading multiple parts to Amazon S3") + log.V(3).Infof("sink.s3.uploader_max_upload_parts: %v => %v", s3sink.uploaderMaxUploadParts, s3manager.MaxUploadParts) } else { - glog.V(0).Infof("sink.s3.uploader_max_upload_parts: %v", s3sink.uploaderMaxUploadParts) + log.V(3).Infof("sink.s3.uploader_max_upload_parts: %v", s3sink.uploaderMaxUploadParts) } - glog.V(0).Infof("sink.s3.uploader_part_size_mb: %v", s3sink.uploaderPartSizeMb) - glog.V(0).Infof("sink.s3.uploader_concurrency: %v", s3sink.uploaderConcurrency) + log.V(3).Infof("sink.s3.uploader_part_size_mb: %v", s3sink.uploaderPartSizeMb) + log.V(3).Infof("sink.s3.uploader_concurrency: %v", s3sink.uploaderConcurrency) return s3sink.initialize( configuration.GetString(prefix+"aws_access_key_id"), @@ -141,9 +141,9 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b result, err := s3sink.conn.DeleteObject(input) if err == nil { - glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) + log.V(1).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) } else { - glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) + log.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) } return err diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index 768e251a4..f06b56fec 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -12,7 +12,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" @@ -76,14 +76,14 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) }) if err != nil { - glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err) + log.V(2).Infof("LookupFileId volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err) } locations := vid2Locations[vid] if locations == nil || len(locations.Locations) == 0 { - glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err) + log.V(2).Infof("LookupFileId locate volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) } @@ -118,7 +118,7 @@ func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Hea for _, fileUrl := range fileUrls { filename, header, resp, err = util_http.DownloadFile(fileUrl, "") if err != nil { - glog.V(1).Infof("fail to read from %s: %v", fileUrl, err) + log.V(2).Infof("fail to read from %s: %v", fileUrl, err) } else { break } diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index 7fc5c3f46..0456961ae 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" @@ -28,8 +28,8 @@ func (k *AwsSqsInput) GetName() string { } func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) + log.V(3).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + log.V(3).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_secret_access_key"), @@ -110,7 +110,7 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif }) if err != nil { - glog.V(1).Infof("delete message from sqs %s: %v", k.queueUrl, err) + log.V(2).Infof("delete message from sqs %s: %v", k.queueUrl, err) } return diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index 2e7640af4..bfeb0ebb8 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -6,7 +6,7 @@ package sub import ( "context" amqp "github.com/rabbitmq/amqp091-go" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "gocloud.dev/pubsub" @@ -41,38 +41,38 @@ func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl str queueNameDLX := "DLX." + queueName ch, err := conn.Channel() if err != nil { - glog.Error(err) + log.Error(err) return err } defer ch.Close() if err := ch.ExchangeDeclare( exchangeNameDLX, "fanout", true, false, false, false, nil); err != nil { - glog.Error(err) + log.Error(err) return err } if err := ch.ExchangeDeclare( exchangeName, "fanout", true, false, false, false, nil); err != nil { - glog.Error(err) + log.Error(err) return err } if _, err := ch.QueueDeclare( queueName, true, false, false, false, amqp.Table{"x-dead-letter-exchange": exchangeNameDLX}); err != nil { - glog.Error(err) + log.Error(err) return err } if err := ch.QueueBind(queueName, "", exchangeName, false, nil); err != nil { - glog.Error(err) + log.Error(err) return err } if _, err := ch.QueueDeclare( queueNameDLX, true, false, false, false, amqp.Table{"x-dead-letter-exchange": exchangeName, "x-message-ttl": 600000}); err != nil { - glog.Error(err) + log.Error(err) return err } if err := ch.QueueBind(queueNameDLX, "", exchangeNameDLX, false, nil); err != nil { - glog.Error(err) + log.Error(err) return err } return nil @@ -90,7 +90,7 @@ func (k *GoCDKPubSubInput) GetName() string { func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { topicUrl := configuration.GetString(prefix + "topic_url") k.subURL = configuration.GetString(prefix + "sub_url") - glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL) + log.V(3).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL) sub, err := pubsub.OpenSubscription(context.Background(), k.subURL) if err != nil { return err @@ -127,7 +127,7 @@ func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.Event k.sub.Shutdown(ctx) conn, err = amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) if err != nil { - glog.Error(err) + log.Error(err) time.Sleep(time.Second) return } @@ -135,7 +135,7 @@ func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.Event return } // This is permanent cached sub err - glog.Fatal(err) + log.Fatal(err) } onFailureFn = func() { if msg.Nackable() { @@ -143,11 +143,11 @@ func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.Event var delivery amqp.Delivery if msg.As(&delivery) { isRedelivered = delivery.Redelivered - glog.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered) + log.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered) } if isRedelivered { if err := delivery.Nack(false, false); err != nil { - glog.Error(err) + log.Error(err) } } else { msg.Nack() diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index c7509abf2..f431a2c1d 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -6,7 +6,7 @@ import ( "os" "cloud.google.com/go/pubsub" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/api/option" @@ -28,8 +28,8 @@ func (k *GooglePubSubInput) GetName() string { } func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) + log.V(3).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + log.V(3).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( configuration.GetString(prefix+"google_application_credentials"), configuration.GetString(prefix+"project_id"), @@ -45,13 +45,13 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId var found bool google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") if !found { - glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml") + log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml") } } client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials)) if err != nil { - glog.Fatalf("Failed to create client: %v", err) + log.Fatalf("Failed to create client: %v", err) } k.topicName = topicName @@ -60,11 +60,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId if !exists { topic, err = client.CreateTopic(ctx, topicName) if err != nil { - glog.Fatalf("Failed to create topic %s: %v", topicName, err) + log.Fatalf("Failed to create topic %s: %v", topicName, err) } } } else { - glog.Fatalf("Failed to check topic %s: %v", topicName, err) + log.Fatalf("Failed to check topic %s: %v", topicName, err) } subscriptionName := "seaweedfs_sub" @@ -74,11 +74,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId if !exists { k.sub, err = client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic}) if err != nil { - glog.Fatalf("Failed to create subscription %s: %v", subscriptionName, err) + log.Fatalf("Failed to create subscription %s: %v", subscriptionName, err) } } } else { - glog.Fatalf("Failed to check subscription %s: %v", topicName, err) + log.Fatalf("Failed to check subscription %s: %v", topicName, err) } k.messageChan = make(chan *pubsub.Message, 1) diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 92f7ce609..4738b5612 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -8,7 +8,7 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/protobuf/proto" @@ -29,8 +29,8 @@ func (k *KafkaInput) GetName() string { } func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) + log.V(3).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + log.V(3).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( configuration.GetStringSlice(prefix+"hosts"), configuration.GetString(prefix+"topic"), @@ -46,7 +46,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, if err != nil { panic(err) } else { - glog.V(0).Infof("connected to %v", hosts) + log.V(3).Infof("connected to %v", hosts) } k.topic = topic @@ -87,7 +87,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, case msg := <-partitionConsumer.Messages(): k.messageChan <- msg if err := progress.setOffset(msg.Partition, msg.Offset); err != nil { - glog.Warningf("set kafka offset: %v", err) + log.Warningf("set kafka offset: %v", err) } } } @@ -121,12 +121,12 @@ func loadProgress(offsetFile string) *KafkaProgress { progress := &KafkaProgress{} data, err := os.ReadFile(offsetFile) if err != nil { - glog.Warningf("failed to read kafka progress file: %s", offsetFile) + log.Warningf("failed to read kafka progress file: %s", offsetFile) return nil } err = json.Unmarshal(data, progress) if err != nil { - glog.Warningf("failed to read kafka progress message: %s", string(data)) + log.Warningf("failed to read kafka progress message: %s", string(data)) return nil } return progress diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index 1fb118d6f..a48576a7d 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/iam_pb" @@ -121,14 +121,14 @@ func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManag } if option.Config != "" { - glog.V(3).Infof("loading static config file %s", option.Config) + log.V(0).Infof("loading static config file %s", option.Config) if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { - glog.Fatalf("fail to load config file %s: %v", option.Config, err) + log.Fatalf("fail to load config file %s: %v", option.Config, err) } } else { - glog.V(3).Infof("no static config file specified... loading config from filer %s", option.Filer) + log.V(0).Infof("no static config file specified... loading config from filer %s", option.Filer) if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil { - glog.Warningf("fail to load config: %v", err) + log.Warningf("fail to load config: %v", err) } } return iam @@ -137,7 +137,7 @@ func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManag func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) (err error) { var content []byte err = pb.WithFilerClient(false, 0, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - glog.V(3).Infof("loading config %s from filer %s", filer.IamConfigDirectory+"/"+filer.IamIdentityFile, option.Filer) + log.V(0).Infof("loading config %s from filer %s", filer.IamConfigDirectory+"/"+filer.IamIdentityFile, option.Filer) content, err = filer.ReadInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile) return err }) @@ -150,7 +150,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3A func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { content, readErr := os.ReadFile(fileName) if readErr != nil { - glog.Warningf("fail to read %s : %v", fileName, readErr) + log.Warningf("fail to read %s : %v", fileName, readErr) return fmt.Errorf("fail to read %s : %v", fileName, readErr) } return iam.LoadS3ApiConfigurationFromBytes(content) @@ -159,7 +159,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName str func (iam *IdentityAccessManagement) LoadS3ApiConfigurationFromBytes(content []byte) error { s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil { - glog.Warningf("unmarshal error: %v", err) + log.Warningf("unmarshal error: %v", err) return fmt.Errorf("unmarshal error: %v", err) } @@ -183,7 +183,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api foundAccountAnonymous := false for _, account := range config.Accounts { - glog.V(3).Infof("loading account name=%s, id=%s", account.DisplayName, account.Id) + log.V(0).Infof("loading account name=%s, id=%s", account.DisplayName, account.Id) switch account.Id { case AccountAdmin.Id: AccountAdmin = Account{ @@ -222,7 +222,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api emailAccount[AccountAnonymous.EmailAddress] = &AccountAnonymous } for _, ident := range config.Identities { - glog.V(3).Infof("loading identity %s", ident.Name) + log.V(0).Infof("loading identity %s", ident.Name) t := &Identity{ Name: ident.Name, Credentials: nil, @@ -239,7 +239,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3Api t.Account = account } else { t.Account = &AccountAdmin - glog.Warningf("identity %s is associated with a non exist account ID, the association is invalid", ident.Name) + log.Warningf("identity %s is associated with a non exist account ID, the association is invalid", ident.Name) } } @@ -285,7 +285,7 @@ func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identi } } } - glog.V(1).Infof("could not find accessKey %s", accessKey) + log.V(2).Infof("could not find accessKey %s", accessKey) return nil, nil, false } @@ -324,7 +324,7 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt } identity, errCode := iam.authRequest(r, action) - glog.V(3).Infof("auth error: %v", errCode) + log.V(0).Infof("auth error: %v", errCode) if errCode == s3err.ErrNone { if identity != nil && identity.Name != "" { r.Header.Set(s3_constants.AmzIdentityId, identity.Name) @@ -349,26 +349,26 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) var authType string switch getRequestAuthType(r) { case authTypeUnknown: - glog.V(3).Infof("unknown auth type") + log.V(0).Infof("unknown auth type") r.Header.Set(s3_constants.AmzAuthType, "Unknown") return identity, s3err.ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: - glog.V(3).Infof("v2 auth type") + log.V(0).Infof("v2 auth type") identity, s3Err = iam.isReqAuthenticatedV2(r) authType = "SigV2" case authTypeStreamingSigned, authTypeSigned, authTypePresigned: - glog.V(3).Infof("v4 auth type") + log.V(0).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) authType = "SigV4" case authTypePostPolicy: - glog.V(3).Infof("post policy auth type") + log.V(0).Infof("post policy auth type") r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") return identity, s3err.ErrNone case authTypeStreamingUnsigned: - glog.V(3).Infof("unsigned streaming upload") + log.V(0).Infof("unsigned streaming upload") return identity, s3err.ErrNone case authTypeJWT: - glog.V(3).Infof("jwt auth type") + log.V(0).Infof("jwt auth type") r.Header.Set(s3_constants.AmzAuthType, "Jwt") return identity, s3err.ErrNotImplemented case authTypeAnonymous: @@ -388,7 +388,7 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) return identity, s3Err } - glog.V(3).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action) + log.V(0).Infof("user name: %v actions: %v, action: %v", identity.Name, identity.Actions, action) bucket, object := s3_constants.GetBucketAndObject(r) prefix := s3_constants.GetPrefix(r) @@ -415,29 +415,29 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err var authType string switch getRequestAuthType(r) { case authTypeStreamingSigned: - glog.V(3).Infof("signed streaming upload") + log.V(0).Infof("signed streaming upload") return identity, s3err.ErrNone case authTypeStreamingUnsigned: - glog.V(3).Infof("unsigned streaming upload") + log.V(0).Infof("unsigned streaming upload") return identity, s3err.ErrNone case authTypeUnknown: - glog.V(3).Infof("unknown auth type") + log.V(0).Infof("unknown auth type") r.Header.Set(s3_constants.AmzAuthType, "Unknown") return identity, s3err.ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: - glog.V(3).Infof("v2 auth type") + log.V(0).Infof("v2 auth type") identity, s3Err = iam.isReqAuthenticatedV2(r) authType = "SigV2" case authTypeSigned, authTypePresigned: - glog.V(3).Infof("v4 auth type") + log.V(0).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) authType = "SigV4" case authTypePostPolicy: - glog.V(3).Infof("post policy auth type") + log.V(0).Infof("post policy auth type") r.Header.Set(s3_constants.AmzAuthType, "PostPolicy") return identity, s3err.ErrNone case authTypeJWT: - glog.V(3).Infof("jwt auth type") + log.V(0).Infof("jwt auth type") r.Header.Set(s3_constants.AmzAuthType, "Jwt") return identity, s3err.ErrNotImplemented case authTypeAnonymous: @@ -455,7 +455,7 @@ func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err r.Header.Set(s3_constants.AmzAuthType, authType) } - glog.V(3).Infof("auth error: %v", s3Err) + log.V(0).Infof("auth error: %v", s3Err) if s3Err != s3err.ErrNone { return identity, s3Err } @@ -476,10 +476,10 @@ func (identity *Identity) canDo(action Action, bucket string, objectKey string) } } if bucket == "" { - glog.V(3).Infof("identity %s is not allowed to perform action %s on %s -- bucket is empty", identity.Name, action, bucket+objectKey) + log.V(0).Infof("identity %s is not allowed to perform action %s on %s -- bucket is empty", identity.Name, action, bucket+objectKey) return false } - glog.V(3).Infof("checking if %s can perform %s on bucket '%s'", identity.Name, action, bucket+objectKey) + log.V(0).Infof("checking if %s can perform %s on bucket '%s'", identity.Name, action, bucket+objectKey) target := string(action) + ":" + bucket + objectKey adminTarget := s3_constants.ACTION_ADMIN + ":" + bucket + objectKey limitedByBucket := string(action) + ":" + bucket @@ -504,7 +504,7 @@ func (identity *Identity) canDo(action Action, bucket string, objectKey string) } } //log error - glog.V(3).Infof("identity %s is not allowed to perform action %s on %s", identity.Name, action, bucket+objectKey) + log.V(0).Infof("identity %s is not allowed to perform action %s on %s", identity.Name, action, bucket+objectKey) return false } diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go index 1f6b30312..b75d7fe76 100644 --- a/weed/s3api/auth_credentials_subscribe.go +++ b/weed/s3api/auth_credentials_subscribe.go @@ -2,7 +2,7 @@ package s3api import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" @@ -49,7 +49,7 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, lastTsNs int64, p metadataFollowOption.ClientEpoch++ return pb.WithFilerClientFollowMetadata(s3a, metadataFollowOption, processEventFn) }, func(err error) bool { - glog.V(0).Infof("iam follow metadata changes: %v", err) + log.V(3).Infof("iam follow metadata changes: %v", err) return true }) } @@ -60,7 +60,7 @@ func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) if err := s3a.iam.LoadS3ApiConfigurationFromBytes(content); err != nil { return err } - glog.V(0).Infof("updated %s/%s", dir, filename) + log.V(3).Infof("updated %s/%s", dir, filename) } return nil } @@ -71,7 +71,7 @@ func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, conte if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil { return err } - glog.V(0).Infof("updated %s/%s", dir, filename) + log.V(3).Infof("updated %s/%s", dir, filename) } return nil } @@ -81,10 +81,10 @@ func (s3a *S3ApiServer) onBucketMetadataChange(dir string, oldEntry *filer_pb.En if dir == s3a.option.BucketsPath { if newEntry != nil { s3a.bucketRegistry.LoadBucketMetadata(newEntry) - glog.V(0).Infof("updated bucketMetadata %s/%s", dir, newEntry) + log.V(3).Infof("updated bucketMetadata %s/%s", dir, newEntry) } else { s3a.bucketRegistry.RemoveBucketMetadata(oldEntry) - glog.V(0).Infof("remove bucketMetadata %s/%s", dir, newEntry) + log.V(3).Infof("remove bucketMetadata %s/%s", dir, newEntry) } } return nil diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index f3b2720ee..2c9345cf7 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -37,7 +37,7 @@ import ( "time" "unicode/utf8" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" ) @@ -169,7 +169,7 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r // Trying with prefix before main path. // Get canonical request. - glog.V(4).Infof("Forwarded Prefix: %s", forwardedPrefix) + log.V(-1).Infof("Forwarded Prefix: %s", forwardedPrefix) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, forwardedPrefix+req.URL.Path, req.Method) errCode = iam.genAndCompareSignatureV4(canonicalRequest, cred.SecretKey, t, signV4Values) @@ -193,7 +193,7 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r func (iam *IdentityAccessManagement) genAndCompareSignatureV4(canonicalRequest, secretKey string, t time.Time, signV4Values signValues) s3err.ErrorCode { // Get string to sign from canonical request. stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) - glog.V(4).Infof("String to Sign:\n%s", stringToSign) + log.V(-1).Infof("String to Sign:\n%s", stringToSign) // Calculate signature. newSignature := iam.getSignature( secretKey, @@ -202,7 +202,7 @@ func (iam *IdentityAccessManagement) genAndCompareSignatureV4(canonicalRequest, signV4Values.Credential.scope.service, stringToSign, ) - glog.V(4).Infof("Signature:\n%s", newSignature) + log.V(-1).Infof("Signature:\n%s", newSignature) // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { @@ -805,7 +805,7 @@ func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, payload, }, "\n") - glog.V(4).Infof("Canonical Request:\n%s", canonicalRequest) + log.V(-1).Infof("Canonical Request:\n%s", canonicalRequest) return canonicalRequest } diff --git a/weed/s3api/bucket_metadata.go b/weed/s3api/bucket_metadata.go index d1f487104..e10ce709f 100644 --- a/weed/s3api/bucket_metadata.go +++ b/weed/s3api/bucket_metadata.go @@ -3,7 +3,7 @@ package s3api import ( "encoding/json" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" @@ -57,7 +57,7 @@ func NewBucketRegistry(s3a *S3ApiServer) *BucketRegistry { } err := br.init() if err != nil { - glog.Fatal("init bucket registry failed", err) + log.Fatal("init bucket registry failed", err) return nil } return br @@ -80,7 +80,7 @@ func (r *BucketRegistry) LoadBucketMetadata(entry *filer_pb.Entry) { func buildBucketMetadata(accountManager AccountManager, entry *filer_pb.Entry) *BucketMetaData { entryJson, _ := json.Marshal(entry) - glog.V(3).Infof("build bucket metadata,entry=%s", entryJson) + log.V(0).Infof("build bucket metadata,entry=%s", entryJson) bucketMetadata := &BucketMetaData{ Name: entry.Name, @@ -102,7 +102,7 @@ func buildBucketMetadata(accountManager AccountManager, entry *filer_pb.Entry) * if valid { bucketMetadata.ObjectOwnership = ownership } else { - glog.Warningf("Invalid ownership: %s, bucket: %s", ownership, bucketMetadata.Name) + log.Warningf("Invalid ownership: %s, bucket: %s", ownership, bucketMetadata.Name) } } @@ -113,7 +113,7 @@ func buildBucketMetadata(accountManager AccountManager, entry *filer_pb.Entry) * ownerAccountId := string(acpOwnerBytes) ownerAccountName := accountManager.GetAccountNameById(ownerAccountId) if ownerAccountName == "" { - glog.Warningf("owner[id=%s] is invalid, bucket: %s", ownerAccountId, bucketMetadata.Name) + log.Warningf("owner[id=%s] is invalid, bucket: %s", ownerAccountId, bucketMetadata.Name) } else { bucketMetadata.Owner = &s3.Owner{ ID: &ownerAccountId, @@ -129,7 +129,7 @@ func buildBucketMetadata(accountManager AccountManager, entry *filer_pb.Entry) * if err == nil { bucketMetadata.Acl = grants } else { - glog.Warningf("Unmarshal ACP grants: %s(%v), bucket: %s", string(acpGrantsBytes), err, bucketMetadata.Name) + log.Warningf("Unmarshal ACP grants: %s(%v), bucket: %s", string(acpGrantsBytes), err, bucketMetadata.Name) } } } diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index d8fdc7525..d095c5f16 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -33,7 +33,7 @@ import ( "net/http" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" @@ -65,9 +65,9 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr switch contentSha256Header { // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' case streamingContentSHA256: - glog.V(3).Infof("streaming content sha256") + log.V(0).Infof("streaming content sha256") case streamingUnsignedPayload: - glog.V(3).Infof("streaming unsigned payload") + log.V(0).Infof("streaming unsigned payload") default: return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch } @@ -148,7 +148,7 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. func (iam *IdentityAccessManagement) newChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) { - glog.V(3).Infof("creating a new newSignV4ChunkedReader") + log.V(0).Infof("creating a new newSignV4ChunkedReader") contentSha256Header := req.Header.Get("X-Amz-Content-Sha256") authorizationHeader := req.Header.Get("Authorization") @@ -161,13 +161,13 @@ func (iam *IdentityAccessManagement) newChunkedReader(req *http.Request) (io.Rea switch contentSha256Header { // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' case streamingContentSHA256: - glog.V(3).Infof("streaming content sha256") + log.V(0).Infof("streaming content sha256") ident, seedSignature, region, seedDate, errCode = iam.calculateSeedSignature(req) if errCode != s3err.ErrNone { return nil, errCode } case streamingUnsignedPayload: - glog.V(3).Infof("streaming unsigned payload") + log.V(0).Infof("streaming unsigned payload") if authorizationHeader != "" { // We do not need to pass the seed signature to the Reader as each chunk is not signed, // but we do compute it to verify the caller has the correct permissions. @@ -183,7 +183,7 @@ func (iam *IdentityAccessManagement) newChunkedReader(req *http.Request) (io.Rea checksumAlgorithm, err := extractChecksumAlgorithm(amzTrailerHeader) if err != nil { - glog.V(3).Infof("error extracting checksum algorithm: %v", err) + log.V(0).Infof("error extracting checksum algorithm: %v", err) return nil, s3err.ErrInvalidRequest } @@ -378,7 +378,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { if extractedCheckSumAlgorithm.String() != cr.checkSumAlgorithm { errorMessage := fmt.Sprintf("checksum algorithm in trailer '%s' does not match the one advertised in the header '%s'", extractedCheckSumAlgorithm.String(), cr.checkSumAlgorithm) - glog.V(3).Info(errorMessage) + log.V(0).Info(errorMessage) cr.err = errors.New(errorMessage) return 0, cr.err } @@ -387,7 +387,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { base64Checksum := base64.StdEncoding.EncodeToString(computedChecksum) if string(extractedChecksum) != base64Checksum { // TODO: Return BadDigest - glog.V(3).Infof("payload checksum '%s' does not match provided checksum '%s'", base64Checksum, string(extractedChecksum)) + log.V(0).Infof("payload checksum '%s' does not match provided checksum '%s'", base64Checksum, string(extractedChecksum)) cr.err = errors.New("payload checksum does not match") return 0, cr.err } diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 3346fdf5f..377c7f43b 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -21,7 +21,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" ) @@ -37,7 +37,7 @@ type InitiateMultipartUploadResult struct { func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) { - glog.V(2).Infof("createMultipartUpload input %v", input) + log.V(1).Infof("createMultipartUpload input %v", input) uploadIdString := s3a.generateUploadID(*input.Key) @@ -55,7 +55,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp entry.Attributes.Mime = *input.ContentType } }); err != nil { - glog.Errorf("NewMultipartUpload error: %v", err) + log.Errorf("NewMultipartUpload error: %v", err) return nil, s3err.ErrInternalError } @@ -77,7 +77,7 @@ type CompleteMultipartUploadResult struct { func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) { - glog.V(2).Infof("completeMultipartUpload input %v", input) + log.V(1).Infof("completeMultipartUpload input %v", input) if len(parts.Parts) == 0 { stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() return nil, s3err.ErrNoSuchUpload @@ -95,7 +95,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList) if err != nil { - glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) + log.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() return nil, s3err.ErrNoSuchUpload } @@ -120,7 +120,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId) if err != nil { - glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) + log.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc() return nil, s3err.ErrNoSuchUpload } @@ -129,14 +129,14 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa entityTooSmall := false for _, entry := range entries { foundEntry := false - glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name) + log.V(-1).Infof("completeMultipartUpload part entries %s", entry.Name) if entry.IsDirectory || !strings.HasSuffix(entry.Name, multipartExt) { continue } partNumber, err := parsePartNumber(entry.Name) if err != nil { stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNumber).Inc() - glog.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err) + log.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err) continue } completedPartsByNumber, ok := completedPartMap[partNumber] @@ -148,16 +148,16 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa entryETag := hex.EncodeToString(entry.Attributes.GetMd5()) if partETag != "" && len(partETag) == 32 && entryETag != "" { if entryETag != partETag { - glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag) + log.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagMismatch).Inc() continue } } else { - glog.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag) + log.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagInvalid).Inc() } if len(entry.Chunks) == 0 { - glog.Warningf("completeMultipartUpload %s empty chunks", entry.Name) + log.Warningf("completeMultipartUpload %s empty chunks", entry.Name) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEmpty).Inc() continue } @@ -168,7 +168,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa if foundEntry { if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] && entry.Attributes.FileSize < multiPartMinSize { - glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name) + log.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name) entityTooSmall = true } } else { @@ -185,7 +185,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa for _, partNumber := range completedPartNumbers { partEntriesByNumber, ok := partEntries[partNumber] if !ok { - glog.Errorf("part %d has no entry", partNumber) + log.Errorf("part %d has no entry", partNumber) stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNotFound).Inc() return nil, s3err.ErrInvalidPart } @@ -238,7 +238,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa }) if err != nil { - glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) + log.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) return nil, s3err.ErrInternalError } @@ -253,13 +253,13 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa for _, deleteEntry := range deleteEntries { //delete unused part data - glog.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name) + log.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name) if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil { - glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err) + log.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err) } } if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { - glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) + log.V(2).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return @@ -296,18 +296,18 @@ func parsePartNumber(fileName string) (int, error) { func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) { - glog.V(2).Infof("abortMultipartUpload input %v", input) + log.V(1).Infof("abortMultipartUpload input %v", input) exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { - glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) + log.V(2).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, s3err.ErrNoSuchUpload } if exists { err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true) } if err != nil { - glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) + log.V(2).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, s3err.ErrInternalError } @@ -334,7 +334,7 @@ type ListMultipartUploadsResult struct { func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html - glog.V(2).Infof("listMultipartUploads input %v", input) + log.V(1).Infof("listMultipartUploads input %v", input) output = &ListMultipartUploadsResult{ Bucket: input.Bucket, @@ -348,7 +348,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32) if err != nil { - glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) + log.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return } @@ -396,7 +396,7 @@ type ListPartsResult struct { func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html - glog.V(2).Infof("listObjectParts input %v", input) + log.V(1).Infof("listObjectParts input %v", input) output = &ListPartsResult{ Bucket: input.Bucket, @@ -409,7 +409,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d%s", *input.PartNumberMarker, multipartExt), false, uint32(*input.MaxParts)) if err != nil { - glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) + log.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, s3err.ErrNoSuchUpload } @@ -422,7 +422,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP if strings.HasSuffix(entry.Name, multipartExt) && !entry.IsDirectory { partNumber, err := parsePartNumber(entry.Name) if err != nil { - glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err) + log.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err) continue } output.Part = append(output.Part, &s3.Part{ diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 8ae8f780a..0643c1aa4 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,7 +3,7 @@ package s3api import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "strings" @@ -62,9 +62,9 @@ func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath strin IgnoreRecursiveError: true, } - glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) + log.V(2).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) if resp, err := client.DeleteEntry(context.Background(), request); err != nil { - glog.V(0).Infof("delete entry %v: %v", request, err) + log.V(3).Infof("delete entry %v: %v", request, err) return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) } else { if resp.Error != "" { diff --git a/weed/s3api/s3api_acl_helper.go b/weed/s3api/s3api_acl_helper.go index f036a9ea7..3b1555625 100644 --- a/weed/s3api/s3api_acl_helper.go +++ b/weed/s3api/s3api_acl_helper.go @@ -5,7 +5,7 @@ import ( "encoding/xml" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" @@ -42,7 +42,7 @@ func ExtractAcl(r *http.Request, accountManager AccountManager, ownership, bucke //owner should present && owner is immutable if *acp.Owner.ID != ownerId { - glog.V(3).Infof("set acl denied! owner account is not consistent, request account id: %s, expect account id: %s", accountId, ownerId) + log.V(0).Infof("set acl denied! owner account is not consistent, request account id: %s, expect account id: %s", accountId, ownerId) return nil, s3err.ErrAccessDenied } @@ -266,41 +266,41 @@ func ValidateAndTransferGrants(accountManager AccountManager, grants []*s3.Grant for _, grant := range grants { grantee := grant.Grantee if grantee == nil || grantee.Type == nil { - glog.Warning("invalid grantee! grantee or granteeType is nil") + log.Warning("invalid grantee! grantee or granteeType is nil") return nil, s3err.ErrInvalidRequest } switch *grantee.Type { case s3_constants.GrantTypeGroup: if grantee.URI == nil { - glog.Warning("invalid group grantee! group URI is nil") + log.Warning("invalid group grantee! group URI is nil") return nil, s3err.ErrInvalidRequest } ok := s3_constants.ValidateGroup(*grantee.URI) if !ok { - glog.Warningf("invalid group grantee! group name[%s] is not valid", *grantee.URI) + log.Warningf("invalid group grantee! group name[%s] is not valid", *grantee.URI) return nil, s3err.ErrInvalidRequest } result = append(result, grant) case s3_constants.GrantTypeCanonicalUser: if grantee.ID == nil { - glog.Warning("invalid canonical grantee! account id is nil") + log.Warning("invalid canonical grantee! account id is nil") return nil, s3err.ErrInvalidRequest } name := accountManager.GetAccountNameById(*grantee.ID) if len(name) == 0 { - glog.Warningf("invalid canonical grantee! account id[%s] is not exists", *grantee.ID) + log.Warningf("invalid canonical grantee! account id[%s] is not exists", *grantee.ID) return nil, s3err.ErrInvalidRequest } result = append(result, grant) case s3_constants.GrantTypeAmazonCustomerByEmail: if grantee.EmailAddress == nil { - glog.Warning("invalid email grantee! email address is nil") + log.Warning("invalid email grantee! email address is nil") return nil, s3err.ErrInvalidRequest } accountId := accountManager.GetAccountIdByEmail(*grantee.EmailAddress) if len(accountId) == 0 { - glog.Warningf("invalid email grantee! email address[%s] is not exists", *grantee.EmailAddress) + log.Warningf("invalid email grantee! email address[%s] is not exists", *grantee.EmailAddress) return nil, s3err.ErrInvalidRequest } result = append(result, &s3.Grant{ @@ -389,7 +389,7 @@ func SetAcpGrantsHeader(r *http.Request, acpGrants []*s3.Grant) { if err == nil { r.Header.Set(s3_constants.ExtAmzAclKey, string(a)) } else { - glog.Warning("Marshal acp grants err", err) + log.Warning("Marshal acp grants err", err) } } } @@ -422,7 +422,7 @@ func AssembleEntryWithAcp(objectEntry *filer_pb.Entry, objectOwner string, grant if len(grants) > 0 { grantsBytes, err := json.Marshal(grants) if err != nil { - glog.Warning("assemble acp to entry:", err) + log.Warning("assemble acp to entry:", err) return s3err.ErrInvalidRequest } objectEntry.Extended[s3_constants.ExtAmzAclKey] = grantsBytes diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 2f535d0d0..a95bc0583 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -23,14 +23,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" ) func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { - glog.V(3).Infof("ListBucketsHandler") + log.V(0).Infof("ListBucketsHandler") var identity *Identity var s3Err s3err.ErrorCode @@ -80,12 +80,12 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketHandler %s", bucket) + log.V(0).Infof("PutBucketHandler %s", bucket) // validate the bucket name err := s3bucket.VerifyS3BucketName(bucket) if err != nil { - glog.Errorf("put invalid bucket name: %v %v", bucket, err) + log.Errorf("put invalid bucket name: %v %v", bucket, err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidBucketName) return } @@ -97,7 +97,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) IncludeEcVolumes: true, IncludeNormalVolumes: true, }); err != nil { - glog.Errorf("list collection: %v", err) + log.Errorf("list collection: %v", err) return fmt.Errorf("list collections: %v", err) } else { for _, c := range resp.Collections { @@ -131,7 +131,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) // create the folder for bucket, but lazily create actual collection if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil { - glog.Errorf("PutBucketHandler mkdir: %v", err) + log.Errorf("PutBucketHandler mkdir: %v", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -142,7 +142,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteBucketHandler %s", bucket) + log.V(0).Infof("DeleteBucketHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -167,7 +167,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque Collection: s3a.getCollectionName(bucket), } - glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) + log.V(2).Infof("delete collection: %v", deleteCollectionRequest) if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } @@ -197,7 +197,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("HeadBucketHandler %s", bucket) + log.V(0).Infof("HeadBucketHandler %s", bucket) if entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket); entry == nil || err == filer_pb.ErrNotFound { s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchBucket) @@ -235,7 +235,7 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { identityId := r.Header.Get(s3_constants.AmzIdentityId) if id, ok := entry.Extended[s3_constants.AmzIdentityId]; ok { if identityId != string(id) { - glog.V(3).Infof("hasAccess: %s != %s (entry.Extended = %v)", identityId, id, entry.Extended) + log.V(0).Infof("hasAccess: %s != %s (entry.Extended = %v)", identityId, id, entry.Extended) return false } } @@ -247,7 +247,7 @@ func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketAclHandler %s", bucket) + log.V(0).Infof("GetBucketAclHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -279,7 +279,7 @@ func (s3a *S3ApiServer) GetBucketAclHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketAclHandler %s", bucket) + log.V(0).Infof("PutBucketAclHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -290,7 +290,7 @@ func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Reque case cannedAcl == "": acl := &s3.AccessControlPolicy{} if err := xmlDecoder(r.Body, acl, r.ContentLength); err != nil { - glog.Errorf("PutBucketAclHandler: %s", err) + log.Errorf("PutBucketAclHandler: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest) return } @@ -310,7 +310,7 @@ func (s3a *S3ApiServer) PutBucketAclHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketLifecycleConfigurationHandler %s", bucket) + log.V(0).Infof("GetBucketLifecycleConfigurationHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -318,7 +318,7 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr } fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) if err != nil { - glog.Errorf("GetBucketLifecycleConfigurationHandler: %s", err) + log.Errorf("GetBucketLifecycleConfigurationHandler: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -355,7 +355,7 @@ func (s3a *S3ApiServer) GetBucketLifecycleConfigurationHandler(w http.ResponseWr func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWriter, r *http.Request) { // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketLifecycleConfigurationHandler %s", bucket) + log.V(0).Infof("PutBucketLifecycleConfigurationHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -364,14 +364,14 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr lifeCycleConfig := Lifecycle{} if err := xmlDecoder(r.Body, &lifeCycleConfig, r.ContentLength); err != nil { - glog.Warningf("PutBucketLifecycleConfigurationHandler xml decode: %s", err) + log.Warningf("PutBucketLifecycleConfigurationHandler xml decode: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) return } fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) if err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler read filer config: %s", err) + log.Errorf("PutBucketLifecycleConfigurationHandler read filer config: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -407,7 +407,7 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr continue } if err := fc.AddLocationConf(locConf); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler add location config: %s", err) + log.Errorf("PutBucketLifecycleConfigurationHandler add location config: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -417,13 +417,13 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr if changed { var buf bytes.Buffer if err := fc.ToText(&buf); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler save config to text: %s", err) + log.Errorf("PutBucketLifecycleConfigurationHandler save config to text: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) } if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) }); err != nil { - glog.Errorf("PutBucketLifecycleConfigurationHandler save config inside filer: %s", err) + log.Errorf("PutBucketLifecycleConfigurationHandler save config inside filer: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -437,7 +437,7 @@ func (s3a *S3ApiServer) PutBucketLifecycleConfigurationHandler(w http.ResponseWr func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) { // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteBucketLifecycleHandler %s", bucket) + log.V(0).Infof("DeleteBucketLifecycleHandler %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) @@ -446,7 +446,7 @@ func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *h fc, err := filer.ReadFilerConf(s3a.option.Filer, s3a.option.GrpcDialOption, nil) if err != nil { - glog.Errorf("DeleteBucketLifecycleHandler read filer config: %s", err) + log.Errorf("DeleteBucketLifecycleHandler read filer config: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -467,13 +467,13 @@ func (s3a *S3ApiServer) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *h if changed { var buf bytes.Buffer if err := fc.ToText(&buf); err != nil { - glog.Errorf("DeleteBucketLifecycleHandler save config to text: %s", err) + log.Errorf("DeleteBucketLifecycleHandler save config to text: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) } if err := s3a.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { return filer.SaveInsideFiler(client, filer.DirectoryEtcSeaweedFS, filer.FilerConfName, buf.Bytes()) }); err != nil { - glog.Errorf("DeleteBucketLifecycleHandler save config inside filer: %s", err) + log.Errorf("DeleteBucketLifecycleHandler save config inside filer: %s", err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -504,7 +504,7 @@ func (s3a *S3ApiServer) GetBucketRequestPaymentHandler(w http.ResponseWriter, r // PutBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketOwnershipControls.html func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketOwnershipControls %s", bucket) + log.V(0).Infof("PutBucketOwnershipControls %s", bucket) errCode := s3a.checkAccessByOwnership(r, bucket) if errCode != s3err.ErrNone { @@ -579,7 +579,7 @@ func (s3a *S3ApiServer) PutBucketOwnershipControls(w http.ResponseWriter, r *htt // GetBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketOwnershipControls.html func (s3a *S3ApiServer) GetBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketOwnershipControls %s", bucket) + log.V(0).Infof("GetBucketOwnershipControls %s", bucket) errCode := s3a.checkAccessByOwnership(r, bucket) if errCode != s3err.ErrNone { @@ -620,7 +620,7 @@ func (s3a *S3ApiServer) GetBucketOwnershipControls(w http.ResponseWriter, r *htt // DeleteBucketOwnershipControls https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketOwnershipControls.html func (s3a *S3ApiServer) DeleteBucketOwnershipControls(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutBucketOwnershipControls %s", bucket) + log.V(0).Infof("PutBucketOwnershipControls %s", bucket) errCode := s3a.checkAccessByOwnership(r, bucket) if errCode != s3err.ErrNone { @@ -661,7 +661,7 @@ func (s3a *S3ApiServer) DeleteBucketOwnershipControls(w http.ResponseWriter, r * // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (s3a *S3ApiServer) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketVersioning %s", bucket) + log.V(0).Infof("GetBucketVersioning %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) diff --git a/weed/s3api/s3api_bucket_skip_handlers.go b/weed/s3api/s3api_bucket_skip_handlers.go index 549eaa8ce..7e43e5379 100644 --- a/weed/s3api/s3api_bucket_skip_handlers.go +++ b/weed/s3api/s3api_bucket_skip_handlers.go @@ -1,7 +1,7 @@ package s3api import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "net/http" @@ -54,7 +54,7 @@ func (s3a *S3ApiServer) PutBucketVersioningHandler(w http.ResponseWriter, r *htt // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html func (s3a *S3ApiServer) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetBucketTagging %s", bucket) + log.V(0).Infof("GetBucketTagging %s", bucket) if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { s3err.WriteErrorResponse(w, r, err) diff --git a/weed/s3api/s3api_circuit_breaker.go b/weed/s3api/s3api_circuit_breaker.go index 6e14da0af..86f391f1a 100644 --- a/weed/s3api/s3api_circuit_breaker.go +++ b/weed/s3api/s3api_circuit_breaker.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/gorilla/mux" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" @@ -32,7 +32,7 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { err := pb.WithFilerClient(false, 0, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile) if errors.Is(err, filer_pb.ErrNotFound) { - glog.Infof("s3 circuit breaker not configured") + log.Infof("s3 circuit breaker not configured") return nil } if err != nil { @@ -42,7 +42,7 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { }) if err != nil { - glog.Infof("s3 circuit breaker not configured correctly: %v", err) + log.Infof("s3 circuit breaker not configured correctly: %v", err) } return cb @@ -51,7 +51,7 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error { cbCfg := &s3_pb.S3CircuitBreakerConfig{} if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil { - glog.Warningf("unmarshal error: %v", err) + log.Warningf("unmarshal error: %v", err) return fmt.Errorf("unmarshal error: %v", err) } if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil { diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 8e5008219..660dca99c 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/util/mem" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" ) @@ -113,7 +113,7 @@ func (s3a *S3ApiServer) toFilerUrl(bucket, object string) string { func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectHandler %s %s", bucket, object) + log.V(0).Infof("GetObjectHandler %s %s", bucket, object) if strings.HasSuffix(r.URL.Path, "/") { s3err.WriteErrorResponse(w, r, s3err.ErrNotImplemented) @@ -128,7 +128,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("HeadObjectHandler %s %s", bucket, object) + log.V(0).Infof("HeadObjectHandler %s %s", bucket, object) destUrl := s3a.toFilerUrl(bucket, object) @@ -137,13 +137,13 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, isWrite bool, responseFn func(proxyResponse *http.Response, w http.ResponseWriter) (statusCode int, bytesTransferred int64)) { - glog.V(3).Infof("s3 proxying %s to %s", r.Method, destUrl) + log.V(0).Infof("s3 proxying %s to %s", r.Method, destUrl) start := time.Now() proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body) if err != nil { - glog.Errorf("NewRequest %s: %v", destUrl, err) + log.Errorf("NewRequest %s: %v", destUrl, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -171,7 +171,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des resp, postErr := s3a.client.Do(proxyReq) if postErr != nil { - glog.Errorf("post to filer: %v", postErr) + log.Errorf("post to filer: %v", postErr) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } @@ -263,7 +263,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) (s defer mem.Free(buf) bytesTransferred, err := io.CopyBuffer(w, proxyResponse.Body, buf) if err != nil { - glog.V(1).Infof("passthrough response read %d bytes: %v", bytesTransferred, err) + log.V(2).Infof("passthrough response read %d bytes: %v", bytesTransferred, err) } return statusCode, bytesTransferred } diff --git a/weed/s3api/s3api_object_handlers_copy.go b/weed/s3api/s3api_object_handlers_copy.go index 4ca8010d2..1b9111cae 100644 --- a/weed/s3api/s3api_object_handlers_copy.go +++ b/weed/s3api/s3api_object_handlers_copy.go @@ -10,7 +10,7 @@ import ( "modernc.org/strutil" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/util" @@ -35,7 +35,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) - glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject) + log.V(0).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject) replaceMeta, replaceTagging := replaceDirective(r.Header) @@ -50,7 +50,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging) entry.Attributes.Mtime = time.Now().Unix() if err != nil { - glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err) + log.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag) return } @@ -100,7 +100,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource) return } - glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + log.V(1).Infof("copy from %s to %s", srcUrl, dstUrl) destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination, dstBucket) @@ -162,7 +162,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req return } - glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID) + log.V(0).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID) // check partID with maximum part ID for multipart objects if partID > globalMaxPartID { @@ -184,7 +184,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req defer util_http.CloseResponse(resp) defer dataReader.Close() - glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + log.V(1).Infof("copy from %s to %s", srcUrl, dstUrl) destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject) etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination, dstBucket) diff --git a/weed/s3api/s3api_object_handlers_delete.go b/weed/s3api/s3api_object_handlers_delete.go index 802e82b5f..4763b4694 100644 --- a/weed/s3api/s3api_object_handlers_delete.go +++ b/weed/s3api/s3api_object_handlers_delete.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -27,7 +27,7 @@ const ( func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteObjectHandler %s %s", bucket, object) + log.V(0).Infof("DeleteObjectHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) dir, name := target.DirAndName() @@ -109,7 +109,7 @@ type DeleteObjectsResponse struct { func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteMultipleObjectsHandler %s", bucket) + log.V(0).Infof("DeleteMultipleObjectsHandler %s", bucket) deleteXMLBytes, err := io.ReadAll(r.Body) if err != nil { @@ -211,7 +211,7 @@ func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerCli continue } if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { - glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) + log.V(-1).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) } else { newDirectoriesWithDeletion[parentDir]++ } diff --git a/weed/s3api/s3api_object_handlers_list.go b/weed/s3api/s3api_object_handlers_list.go index 6a4740fef..bc4415dbf 100644 --- a/weed/s3api/s3api_object_handlers_list.go +++ b/weed/s3api/s3api_object_handlers_list.go @@ -5,7 +5,7 @@ import ( "encoding/xml" "fmt" "github.com/aws/aws-sdk-go/service/s3" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" @@ -50,7 +50,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV2Handler %s", bucket) + log.V(0).Infof("ListObjectsV2Handler %s", bucket) originalPrefix, startAfter, delimiter, continuationToken, encodingTypeUrl, fetchOwner, maxKeys := getListObjectsV2Args(r.URL.Query()) @@ -104,7 +104,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ // collect parameters bucket, _ := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("ListObjectsV1Handler %s", bucket) + log.V(0).Infof("ListObjectsV1Handler %s", bucket) originalPrefix, marker, delimiter, encodingTypeUrl, maxKeys := getListObjectsV1Args(r.URL.Query()) @@ -312,7 +312,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d // invariants // prefix and marker should be under dir, marker may contain "/" // maxKeys should be updated for each recursion - // glog.V(4).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter) + // log.V(-1).Infof("doListFilerEntries dir: %s, prefix: %s, marker %s, maxKeys: %d, prefixEndsOnDelimiter: %+v", dir, prefix, marker, cursor.maxKeys, cursor.prefixEndsOnDelimiter) if prefix == "/" && delimiter == "/" { return } @@ -382,7 +382,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d } } if entry.IsDirectory { - // glog.V(4).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) + // log.V(-1).Infof("List Dir Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) if entry.Name == s3_constants.MultipartUploadsFolder { // FIXME no need to apply to all directories. this extra also affects maxKeys continue } @@ -410,7 +410,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d var isEmpty bool if !s3a.option.AllowEmptyFolder && entry.IsOlderDir() { //if isEmpty, err = s3a.ensureDirectoryAllEmpty(client, dir, entry.Name); err != nil { - // glog.Errorf("check empty folder %s: %v", dir, err) + // log.Errorf("check empty folder %s: %v", dir, err) //} } if !isEmpty { @@ -419,7 +419,7 @@ func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, d } } else { eachEntryFn(dir, entry) - // glog.V(4).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) + // log.V(-1).Infof("List File Entries %s, file: %s, maxKeys %d", dir, entry.Name, cursor.maxKeys) } if cursor.prefixEndsOnDelimiter { cursor.prefixEndsOnDelimiter = false @@ -462,8 +462,8 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) { // println("+ ensureDirectoryAllEmpty", dir, name) - glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name) - defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) + log.V(-1).Infof("+ isEmpty %s/%s", parentDir, name) + defer log.V(-1).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) var fileCounter int var subDirs []string currentDir := parentDir + "/" + name @@ -480,7 +480,7 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile } startFrom = entry.Name isExhausted = isExhausted || isLast - glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) + log.V(-1).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) return nil }, startFrom, false, 8) if !foundEntry { @@ -506,7 +506,7 @@ func (s3a *S3ApiServer) ensureDirectoryAllEmpty(filerClient filer_pb.SeaweedFile } } - glog.V(1).Infof("deleting empty folder %s", currentDir) + log.V(2).Infof("deleting empty folder %s", currentDir) if err = doDeleteEntry(filerClient, parentDir, name, true, false); err != nil { return } diff --git a/weed/s3api/s3api_object_handlers_multipart.go b/weed/s3api/s3api_object_handlers_multipart.go index dfd9f5844..472ce280a 100644 --- a/weed/s3api/s3api_object_handlers_multipart.go +++ b/weed/s3api/s3api_object_handlers_multipart.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" weed_server "github.com/seaweedfs/seaweedfs/weed/server" @@ -48,7 +48,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http } response, errCode := s3a.createMultipartUpload(createMultipartUploadInput) - glog.V(2).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) + log.V(1).Info("NewMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -85,7 +85,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r UploadId: aws.String(uploadID), }, parts) - glog.V(2).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) + log.V(1).Info("CompleteMultipartUploadHandler", string(s3err.EncodeXMLResponse(response)), errCode) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -121,7 +121,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht return } - glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) + log.V(1).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html s3err.WriteEmptyResponse(w, r, http.StatusNoContent) @@ -156,7 +156,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht UploadIdMarker: aws.String(uploadIDMarker), }) - glog.V(2).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode) + log.V(1).Infof("ListMultipartUploadsHandler %s errCode=%d", string(s3err.EncodeXMLResponse(response)), errCode) if errCode != s3err.ErrNone { s3err.WriteErrorResponse(w, r, errCode) @@ -201,7 +201,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re return } - glog.V(2).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part)) + log.V(1).Infof("ListObjectPartsHandler %s count=%d", string(s3err.EncodeXMLResponse(response)), len(response.Part)) writeSuccessResponseXML(w, r, response) @@ -236,7 +236,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ } defer dataReader.Close() - glog.V(2).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID) + log.V(1).Infof("PutObjectPartHandler %s %s %04d", bucket, uploadID, partID) uploadUrl := s3a.genPartUploadUrl(bucket, uploadID, partID) @@ -282,7 +282,7 @@ func (s3a *S3ApiServer) checkUploadId(object string, id string) error { hash := s3a.generateUploadID(object) if !strings.HasPrefix(id, hash) { - glog.Errorf("object %s and uploadID %s are not matched", object, id) + log.Errorf("object %s and uploadID %s are not matched", object, id) return fmt.Errorf("object %s and uploadID %s are not matched", object, id) } return nil diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go index e77d734ac..faff0edad 100644 --- a/weed/s3api/s3api_object_handlers_postpolicy.go +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -13,7 +13,7 @@ import ( "github.com/dustin/go-humanize" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/s3api/policy" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" @@ -26,7 +26,7 @@ func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.R bucket := mux.Vars(r)["bucket"] - glog.V(3).Infof("PostPolicyBucketHandler %s", bucket) + log.V(0).Infof("PostPolicyBucketHandler %s", bucket) reader, err := r.MultipartReader() if err != nil { diff --git a/weed/s3api/s3api_object_handlers_put.go b/weed/s3api/s3api_object_handlers_put.go index 716fec0aa..d8010490f 100644 --- a/weed/s3api/s3api_object_handlers_put.go +++ b/weed/s3api/s3api_object_handlers_put.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/security" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_server "github.com/seaweedfs/seaweedfs/weed/server" stats_collect "github.com/seaweedfs/seaweedfs/weed/stats" @@ -25,7 +25,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectHandler %s %s", bucket, object) + log.V(0).Infof("PutObjectHandler %s %s", bucket, object) _, err := validateContentMd5(r.Header) if err != nil { @@ -99,7 +99,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader proxyReq, err := http.NewRequest(http.MethodPut, uploadUrl, body) if err != nil { - glog.Errorf("NewRequest %s: %v", uploadUrl, err) + log.Errorf("NewRequest %s: %v", uploadUrl, err) return "", s3err.ErrInternalError } @@ -125,7 +125,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := s3a.client.Do(proxyReq) if postErr != nil { - glog.Errorf("post to filer: %v", postErr) + log.Errorf("post to filer: %v", postErr) return "", s3err.ErrInternalError } defer resp.Body.Close() @@ -134,17 +134,17 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp_body, ra_err := io.ReadAll(resp.Body) if ra_err != nil { - glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) + log.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) return etag, s3err.ErrInternalError } var ret weed_server.FilerPostResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) + log.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) return "", s3err.ErrInternalError } if ret.Error != "" { - glog.Errorf("upload to filer error: %v", ret.Error) + log.Errorf("upload to filer error: %v", ret.Error) return "", filerErrorToS3Error(ret.Error) } stats_collect.RecordBucketActiveTime(bucket) diff --git a/weed/s3api/s3api_object_handlers_tagging.go b/weed/s3api/s3api_object_handlers_tagging.go index 23ca05133..4ebfda5e0 100644 --- a/weed/s3api/s3api_object_handlers_tagging.go +++ b/weed/s3api/s3api_object_handlers_tagging.go @@ -7,7 +7,7 @@ import ( "io" "net/http" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3err" "github.com/seaweedfs/seaweedfs/weed/util" @@ -18,7 +18,7 @@ import ( func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("GetObjectTaggingHandler %s %s", bucket, object) + log.V(0).Infof("GetObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) dir, name := target.DirAndName() @@ -26,10 +26,10 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R tags, err := s3a.getTags(dir, name) if err != nil { if err == filer_pb.ErrNotFound { - glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + log.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) } else { - glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + log.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) } return @@ -44,7 +44,7 @@ func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.R func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("PutObjectTaggingHandler %s %s", bucket, object) + log.V(0).Infof("PutObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) dir, name := target.DirAndName() @@ -52,29 +52,29 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R tagging := &Tagging{} input, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength)) if err != nil { - glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) + log.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) return } if err = xml.Unmarshal(input, tagging); err != nil { - glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) + log.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrMalformedXML) return } tags := tagging.ToTags() err = ValidateTags(tags) if err != nil { - glog.Errorf("PutObjectTaggingHandler ValidateTags error %s: %v", r.URL, err) + log.Errorf("PutObjectTaggingHandler ValidateTags error %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag) return } if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil { if err == filer_pb.ErrNotFound { - glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + log.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) } else { - glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + log.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) } return @@ -89,7 +89,7 @@ func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.R func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { bucket, object := s3_constants.GetBucketAndObject(r) - glog.V(3).Infof("DeleteObjectTaggingHandler %s %s", bucket, object) + log.V(0).Infof("DeleteObjectTaggingHandler %s %s", bucket, object) target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) dir, name := target.DirAndName() @@ -97,10 +97,10 @@ func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *htt err := s3a.rmTags(dir, name) if err != nil { if err == filer_pb.ErrNotFound { - glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + log.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrNoSuchKey) } else { - glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + log.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) s3err.WriteErrorResponse(w, r, s3err.ErrInternalError) } return diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 2f9e9e3fb..b22cb51ec 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -9,7 +9,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/s3_pb" "github.com/seaweedfs/seaweedfs/weed/util/grace" @@ -80,9 +80,9 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer if option.Config != "" { grace.OnReload(func() { if err := s3ApiServer.iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { - glog.Errorf("fail to load config file %s: %v", option.Config, err) + log.Errorf("fail to load config file %s: %v", option.Config, err) } else { - glog.V(0).Infof("Loaded %d identities from config file %s", len(s3ApiServer.iam.identities), option.Config) + log.V(3).Infof("Loaded %d identities from config file %s", len(s3ApiServer.iam.identities), option.Config) } }) } diff --git a/weed/s3api/s3err/audit_fluent.go b/weed/s3api/s3err/audit_fluent.go index ef2459eac..5e7e2487c 100644 --- a/weed/s3api/s3err/audit_fluent.go +++ b/weed/s3api/s3err/audit_fluent.go @@ -8,8 +8,8 @@ import ( "time" "github.com/fluent/fluent-logger-golang/fluent" - "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type AccessLogExtend struct { @@ -57,12 +57,12 @@ var ( func InitAuditLog(config string) { configContent, readErr := os.ReadFile(config) if readErr != nil { - glog.Errorf("fail to read fluent config %s : %v", config, readErr) + log.Errorf("fail to read fluent config %s : %v", config, readErr) return } fluentConfig := &fluent.Config{} if err := json.Unmarshal(configContent, fluentConfig); err != nil { - glog.Errorf("fail to parse fluent config %s : %v", string(configContent), err) + log.Errorf("fail to parse fluent config %s : %v", string(configContent), err) return } if len(fluentConfig.TagPrefix) == 0 && len(environment) > 0 { @@ -71,13 +71,13 @@ func InitAuditLog(config string) { fluentConfig.Async = true fluentConfig.AsyncResultCallback = func(data []byte, err error) { if err != nil { - glog.Warning("Error while posting log: ", err) + log.Warningf("Error while posting log: %v", err) } } var err error Logger, err = fluent.New(*fluentConfig) if err != nil { - glog.Errorf("fail to load fluent config: %v", err) + log.Errorf("fail to load fluent config: %v", err) } } @@ -170,15 +170,15 @@ func PostLog(r *http.Request, HTTPStatusCode int, errorCode ErrorCode) { return } if err := Logger.Post(tag, *GetAccessLog(r, HTTPStatusCode, errorCode)); err != nil { - glog.Warning("Error while posting log: ", err) + log.Warningf("Error while posting log: %v", err) } } -func PostAccessLog(log AccessLog) { - if Logger == nil || len(log.Key) == 0 { +func PostAccessLog(accessLog AccessLog) { + if Logger == nil || len(accessLog.Key) == 0 { return } - if err := Logger.Post(tag, log); err != nil { - glog.Warning("Error while posting log: ", err) + if err := Logger.Post(tag, accessLog); err != nil { + log.Warningf("Error while posting log: %v", err) } } diff --git a/weed/s3api/s3err/error_handler.go b/weed/s3api/s3err/error_handler.go index 910dab12a..14fdb8819 100644 --- a/weed/s3api/s3err/error_handler.go +++ b/weed/s3api/s3err/error_handler.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/gorilla/mux" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "net/http" "strconv" "strings" @@ -93,10 +93,10 @@ func WriteResponse(w http.ResponseWriter, r *http.Request, statusCode int, respo } w.WriteHeader(statusCode) if response != nil { - glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) + log.V(-1).Infof("status %d %s: %s", statusCode, mType, string(response)) _, err := w.Write(response) if err != nil { - glog.V(0).Infof("write err: %v", err) + log.V(3).Infof("write err: %v", err) } w.(http.Flusher).Flush() } @@ -104,6 +104,6 @@ func WriteResponse(w http.ResponseWriter, r *http.Request, statusCode int, respo // If none of the http routes match respond with MethodNotAllowed func NotFoundHandler(w http.ResponseWriter, r *http.Request) { - glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) + log.V(3).Infof("unsupported %s %s", r.Method, r.RequestURI) WriteErrorResponse(w, r, ErrMethodNotAllowed) } diff --git a/weed/security/guard.go b/weed/security/guard.go index f92b10044..a04c403c9 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -3,7 +3,7 @@ package security import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "net" "net/http" "strings" @@ -112,7 +112,7 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error { } } - glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr) + log.V(3).Infof("Not in whitelist: %s", r.RemoteAddr) return fmt.Errorf("Not in whitelist: %s", r.RemoteAddr) } @@ -123,7 +123,7 @@ func (g *Guard) UpdateWhiteList(whiteList []string) { if strings.Contains(ip, "/") { _, cidrnet, err := net.ParseCIDR(ip) if err != nil { - glog.Errorf("Parse CIDR %s in whitelist failed: %v", ip, err) + log.Errorf("Parse CIDR %s in whitelist failed: %v", ip, err) } whiteListCIDR[ip] = cidrnet } else { diff --git a/weed/security/jwt.go b/weed/security/jwt.go index d859e9ea8..a1ab45d52 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -7,7 +7,7 @@ import ( "time" jwt "github.com/golang-jwt/jwt/v5" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type EncodedJwt string @@ -42,7 +42,7 @@ func GenJwtForVolumeServer(signingKey SigningKey, expiresAfterSec int, fileId st t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) encoded, e := t.SignedString([]byte(signingKey)) if e != nil { - glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e) + log.V(3).Infof("Failed to sign claims %+v: %v", t.Claims, e) return "" } return EncodedJwt(encoded) @@ -64,7 +64,7 @@ func GenJwtForFilerServer(signingKey SigningKey, expiresAfterSec int) EncodedJwt t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) encoded, e := t.SignedString([]byte(signingKey)) if e != nil { - glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e) + log.V(3).Infof("Failed to sign claims %+v: %v", t.Claims, e) return "" } return EncodedJwt(encoded) diff --git a/weed/security/tls.go b/weed/security/tls.go index 1a9dfacb5..cf17849ef 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -40,7 +40,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption serverIdentityProvider, err := pemfile.NewProvider(serverOptions) if err != nil { - glog.Warningf("pemfile.NewProvider(%v) %v failed: %v", serverOptions, component, err) + log.Warningf("pemfile.NewProvider(%v) %v failed: %v", serverOptions, component, err) return nil, nil } @@ -50,7 +50,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption } serverRootProvider, err := pemfile.NewProvider(serverRootOptions) if err != nil { - glog.Warningf("pemfile.NewProvider(%v) failed: %v", serverRootOptions, err) + log.Warningf("pemfile.NewProvider(%v) failed: %v", serverRootOptions, err) return nil, nil } @@ -67,17 +67,17 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption } options.MinTLSVersion, err = TlsVersionByName(config.GetString("tls.min_version")) if err != nil { - glog.Warningf("tls min version parse failed, %v", err) + log.Warningf("tls min version parse failed, %v", err) return nil, nil } options.MaxTLSVersion, err = TlsVersionByName(config.GetString("tls.max_version")) if err != nil { - glog.Warningf("tls max version parse failed, %v", err) + log.Warningf("tls max version parse failed, %v", err) return nil, nil } options.CipherSuites, err = TlsCipherSuiteByNames(config.GetString("tls.cipher_suites")) if err != nil { - glog.Warningf("tls cipher suite parse failed, %v", err) + log.Warningf("tls cipher suite parse failed, %v", err) return nil, nil } allowedCommonNames := config.GetString(component + ".allowed_commonNames") @@ -99,7 +99,7 @@ func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption } ta, err := advancedtls.NewServerCreds(options) if err != nil { - glog.Warningf("advancedtls.NewServerCreds(%v) failed: %v", options, err) + log.Warningf("advancedtls.NewServerCreds(%v) failed: %v", options, err) return nil, nil } return grpc.Creds(ta), nil @@ -122,7 +122,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { } clientProvider, err := pemfile.NewProvider(clientOptions) if err != nil { - glog.Warningf("pemfile.NewProvider(%v) failed %v", clientOptions, err) + log.Warningf("pemfile.NewProvider(%v) failed %v", clientOptions, err) return grpc.WithTransportCredentials(insecure.NewCredentials()) } clientRootOptions := pemfile.Options{ @@ -131,7 +131,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { } clientRootProvider, err := pemfile.NewProvider(clientRootOptions) if err != nil { - glog.Warningf("pemfile.NewProvider(%v) failed: %v", clientRootOptions, err) + log.Warningf("pemfile.NewProvider(%v) failed: %v", clientRootOptions, err) return grpc.WithTransportCredentials(insecure.NewCredentials()) } options := &advancedtls.Options{ @@ -148,7 +148,7 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { } ta, err := advancedtls.NewClientCreds(options) if err != nil { - glog.Warningf("advancedtls.NewClientCreds(%v) failed: %v", options, err) + log.Warningf("advancedtls.NewClientCreds(%v) failed: %v", options, err) return grpc.WithTransportCredentials(insecure.NewCredentials()) } return grpc.WithTransportCredentials(ta) @@ -157,12 +157,12 @@ func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { func LoadClientTLSHTTP(clientCertFile string) *tls.Config { clientCerts, err := os.ReadFile(clientCertFile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } certPool := x509.NewCertPool() ok := certPool.AppendCertsFromPEM(clientCerts) if !ok { - glog.Fatalf("Error processing client certificate in %s\n", clientCertFile) + log.Fatalf("Error processing client certificate in %s\n", clientCertFile) } return &tls.Config{ @@ -179,7 +179,7 @@ func (a Authenticator) Authenticate(params *advancedtls.HandshakeVerificationInf return &advancedtls.PostHandshakeVerificationResults{}, nil } err := fmt.Errorf("Authenticate: invalid subject client common name: %s", params.Leaf.Subject.CommonName) - glog.Error(err) + log.Error(err) return nil, err } diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go index 05694f681..f27fecb36 100644 --- a/weed/sequence/snowflake_sequencer.go +++ b/weed/sequence/snowflake_sequencer.go @@ -5,7 +5,7 @@ import ( "hash/fnv" "github.com/bwmarrin/snowflake" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // a simple snowflake Sequencer @@ -18,7 +18,7 @@ func NewSnowflakeSequencer(nodeid string, snowflakeId int) (*SnowflakeSequencer, if snowflakeId != 0 { nodeid_hash = uint32(snowflakeId) } - glog.V(0).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash) + log.V(3).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash) node, err := snowflake.NewNode(int64(nodeid_hash)) if err != nil { fmt.Println(err) diff --git a/weed/server/common.go b/weed/server/common.go index 5dad9d81b..652f903a3 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -22,7 +22,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -74,7 +74,7 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter } if httpStatus >= 400 { - glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s", + log.V(3).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s", r.Method, r.URL.String(), httpStatus, string(bytes)) } @@ -110,19 +110,19 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON status %s %d: %v", r.URL, httpStatus, err) - glog.V(1).Infof("JSON content: %+v", obj) + log.V(3).Infof("error writing JSON status %s %d: %v", r.URL, httpStatus, err) + log.V(2).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { m := make(map[string]interface{}) m["error"] = err.Error() - glog.V(1).Infof("error JSON response status %d: %s", httpStatus, m["error"]) + log.V(2).Infof("error JSON response status %d: %s", httpStatus, m["error"]) writeJsonQuiet(w, r, httpStatus, m) } func debug(params ...interface{}) { - glog.V(4).Infoln(params...) + log.V(-1).Infoln(params...) } func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) { @@ -302,13 +302,13 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) writeFn, err := prepareWriteFn(0, totalSize) if err != nil { - glog.Errorf("ProcessRangeRequest: %v", err) + log.Errorf("ProcessRangeRequest: %v", err) w.Header().Del("Content-Length") http.Error(w, err.Error(), http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest: %v", err) } if err = writeFn(bufferedWriter); err != nil { - glog.Errorf("ProcessRangeRequest: %v", err) + log.Errorf("ProcessRangeRequest: %v", err) w.Header().Del("Content-Length") http.Error(w, err.Error(), http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest: %v", err) @@ -320,7 +320,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 //mostly copy from src/pkg/net/http/fs.go ranges, err := parseRange(rangeReq, totalSize) if err != nil { - glog.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err) + log.Errorf("ProcessRangeRequest headers: %+v err: %v", w.Header(), err) http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) return fmt.Errorf("ProcessRangeRequest header: %v", err) } @@ -352,7 +352,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 writeFn, err := prepareWriteFn(ra.start, ra.length) if err != nil { - glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) + log.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) w.Header().Del("Content-Length") http.Error(w, err.Error(), http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest: %v", err) @@ -360,7 +360,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 w.WriteHeader(http.StatusPartialContent) err = writeFn(bufferedWriter) if err != nil { - glog.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) + log.Errorf("ProcessRangeRequest range[0]: %+v err: %v", w.Header(), err) w.Header().Del("Content-Length") http.Error(w, err.Error(), http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest range[0]: %v", err) @@ -378,7 +378,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 } writeFn, err := prepareWriteFn(ra.start, ra.length) if err != nil { - glog.Errorf("ProcessRangeRequest range[%d] err: %v", i, err) + log.Errorf("ProcessRangeRequest range[%d] err: %v", i, err) http.Error(w, "Internal Error", http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest range[%d] err: %v", i, err) } @@ -415,7 +415,7 @@ func ProcessRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64 } w.WriteHeader(http.StatusPartialContent) if _, err := io.CopyN(bufferedWriter, sendContent, sendSize); err != nil { - glog.Errorf("ProcessRangeRequest err: %v", err) + log.Errorf("ProcessRangeRequest err: %v", err) http.Error(w, "Internal Error", http.StatusInternalServerError) return fmt.Errorf("ProcessRangeRequest err: %v", err) } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index b1440c94f..cf4bd0396 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -11,7 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/cluster" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" @@ -21,14 +21,14 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) + log.V(-1).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name)) if err == filer_pb.ErrNotFound { return &filer_pb.LookupDirectoryEntryResponse{}, err } if err != nil { - glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) + log.V(0).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } @@ -39,7 +39,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) { - glog.V(4).Infof("ListEntries %v", req) + log.V(-1).Infof("ListEntries %v", req) limit := int(req.Limit) if limit == 0 { @@ -97,7 +97,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol for _, vidString := range req.VolumeIds { vid, err := strconv.Atoi(vidString) if err != nil { - glog.V(1).Infof("Unknown volume id %d", vid) + log.V(2).Infof("Unknown volume id %d", vid) return nil, err } var locs []*filer_pb.Location @@ -138,7 +138,7 @@ func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err err func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name) + log.V(-1).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name) resp = &filer_pb.CreateEntryResponse{} @@ -160,7 +160,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr if createErr == nil { fs.filer.DeleteChunksNotRecursive(garbage) } else { - glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + log.V(0).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) resp.Error = createErr.Error() } @@ -169,7 +169,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - glog.V(4).Infof("UpdateEntry %v", req) + log.V(-1).Infof("UpdateEntry %v", req) fullpath := util.Join(req.Directory, req.Entry.Name) entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) @@ -195,7 +195,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures) } else { - glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) + log.V(0).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } return &filer_pb.UpdateEntryResponse{}, err @@ -230,7 +230,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks) if err != nil { // not good, but should be ok - glog.V(0).Infof("MaybeManifestize: %v", err) + log.V(3).Infof("MaybeManifestize: %v", err) } } @@ -241,7 +241,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) { - glog.V(4).Infof("AppendToEntry %v", req) + log.V(-1).Infof("AppendToEntry %v", req) fullpath := util.NewFullPath(req.Directory, req.EntryName) lockClient := cluster.NewLockClient(fs.grpcDialOption, fs.option.Host) @@ -273,13 +273,13 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo entry.Chunks = append(entry.GetChunks(), req.Chunks...) so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "") if err != nil { - glog.Warningf("detectStorageOption: %v", err) + log.Warningf("detectStorageOption: %v", err) return &filer_pb.AppendToEntryResponse{}, err } entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.GetChunks()) if err != nil { // not good, but should be ok - glog.V(0).Infof("MaybeManifestize: %v", err) + log.V(3).Infof("MaybeManifestize: %v", err) } err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil, false, fs.filer.MaxFilenameLength) @@ -289,7 +289,7 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - glog.V(4).Infof("DeleteEntry %v", req) + log.V(-1).Infof("DeleteEntry %v", req) err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures, req.IfNotModifiedAfter) resp = &filer_pb.DeleteEntryResponse{} @@ -307,7 +307,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol so, err := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack, req.DataNode) if err != nil { - glog.V(3).Infof("AssignVolume: %v", err) + log.V(0).Infof("AssignVolume: %v", err) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } @@ -315,11 +315,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) if err != nil { - glog.V(3).Infof("AssignVolume: %v", err) + log.V(0).Infof("AssignVolume: %v", err) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + log.V(0).Infof("AssignVolume error: %v", assignResult.Error) return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } @@ -339,7 +339,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) { - glog.V(4).Infof("CollectionList %v", req) + log.V(-1).Infof("CollectionList %v", req) resp = &filer_pb.CollectionListResponse{} err = fs.filer.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { @@ -361,7 +361,7 @@ func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.Collect func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - glog.V(4).Infof("DeleteCollection %v", req) + log.V(-1).Infof("DeleteCollection %v", req) err = fs.filer.DoDeleteCollection(req.GetCollection()) diff --git a/weed/server/filer_grpc_server_admin.go b/weed/server/filer_grpc_server_admin.go index 8b4912258..e9c2c3dde 100644 --- a/weed/server/filer_grpc_server_admin.go +++ b/weed/server/filer_grpc_server_admin.go @@ -6,7 +6,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" @@ -100,7 +100,7 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. MinorVersion: util.MINOR_VERSION, } - glog.V(4).Infof("GetFilerConfiguration: %v", t) + log.V(-1).Infof("GetFilerConfiguration: %v", t) return t, nil } diff --git a/weed/server/filer_grpc_server_dlm.go b/weed/server/filer_grpc_server_dlm.go index 5ec147835..3396a63ad 100644 --- a/weed/server/filer_grpc_server_dlm.go +++ b/weed/server/filer_grpc_server_dlm.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/cluster/lock_manager" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "google.golang.org/grpc/codes" @@ -20,7 +20,7 @@ func (fs *FilerServer) DistributedLock(ctx context.Context, req *filer_pb.LockRe var movedTo pb.ServerAddress expiredAtNs := time.Now().Add(time.Duration(req.SecondsToLock) * time.Second).UnixNano() resp.LockOwner, resp.RenewToken, movedTo, err = fs.filer.Dlm.LockWithTimeout(req.Name, expiredAtNs, req.RenewToken, req.Owner) - glog.V(3).Infof("lock %s %v %v %v, isMoved=%v %v", req.Name, req.SecondsToLock, req.RenewToken, req.Owner, req.IsMoved, movedTo) + log.V(0).Infof("lock %s %v %v %v, isMoved=%v %v", req.Name, req.SecondsToLock, req.RenewToken, req.Owner, req.IsMoved, movedTo) if movedTo != "" && movedTo != fs.option.Host && !req.IsMoved { err = pb.WithFilerClient(false, 0, movedTo, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { secondResp, err := client.DistributedLock(context.Background(), &filer_pb.LockRequest{ @@ -100,7 +100,7 @@ func (fs *FilerServer) FindLockOwner(ctx context.Context, req *filer_pb.FindLock } if owner == "" { - glog.V(0).Infof("find lock %s moved to %v: %v", req.Name, movedTo, err) + log.V(3).Infof("find lock %s moved to %v: %v", req.Name, movedTo, err) return nil, status.Error(codes.NotFound, fmt.Sprintf("lock %s not found", req.Name)) } if err != nil { @@ -145,7 +145,7 @@ func (fs *FilerServer) OnDlmChangeSnapshot(snapshot []pb.ServerAddress) { return err }); err != nil { // it may not be worth retrying, since the lock may have expired - glog.Errorf("transfer lock %v to %v: %v", lock.Key, server, err) + log.Errorf("transfer lock %v to %v: %v", lock.Key, server, err) } } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index db00dd496..e45c66620 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -7,14 +7,14 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { - glog.V(1).Infof("AtomicRenameEntry %v", req) + log.V(2).Infof("AtomicRenameEntry %v", req) oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) @@ -50,7 +50,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom func (fs *FilerServer) StreamRenameEntry(req *filer_pb.StreamRenameEntryRequest, stream filer_pb.SeaweedFiler_StreamRenameEntryServer) (err error) { - glog.V(1).Infof("StreamRenameEntry %v", req) + log.V(2).Infof("StreamRenameEntry %v", req) oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) @@ -122,7 +122,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, stream filer_pb currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) - glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath) + log.V(2).Infof("moving folder %s => %s", currentDirPath, newDirPath) lastFileName := "" includeLastFile := false @@ -154,10 +154,10 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, stream filer_pb.Seawee oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) - glog.V(1).Infof("moving entry %s => %s", oldPath, newPath) + log.V(2).Infof("moving entry %s => %s", oldPath, newPath) if oldPath == newPath { - glog.V(1).Infof("skip moving entry %s => %s", oldPath, newPath) + log.V(2).Infof("skip moving entry %s => %s", oldPath, newPath) return nil } diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go index dfe594b46..583fd06c6 100644 --- a/weed/server/filer_grpc_server_sub_meta.go +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -12,7 +12,7 @@ import ( "google.golang.org/protobuf/proto" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util/log_buffer" @@ -36,13 +36,13 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, return fmt.Errorf("duplicated subscription detected for client %s id %d", clientName, req.ClientId) } defer func() { - glog.V(0).Infof("disconnect %v subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) + log.V(3).Infof("disconnect %v subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) fs.deleteClient("", clientName, req.ClientId, req.ClientEpoch) fs.filer.MetaAggregator.ListenersCond.Broadcast() // nudges the subscribers that are waiting }() lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2) - glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + log.V(3).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName) @@ -55,7 +55,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, for { - glog.V(4).Infof("read on disk %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + log.V(-1).Infof("read on disk %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn) if readPersistedLogErr != nil { @@ -65,7 +65,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, return nil } - glog.V(4).Infof("processed to %v: %v", clientName, processedTsNs) + log.V(-1).Infof("processed to %v: %v", clientName, processedTsNs) if processedTsNs != 0 { lastReadTime = log_buffer.NewMessagePosition(processedTsNs, -2) } else { @@ -80,7 +80,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, } } - glog.V(4).Infof("read in memory %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + log.V(-1).Infof("read in memory %v aggregated subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) lastReadTime, isDone, readInMemoryLogErr = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData("aggMeta:"+clientName, lastReadTime, req.UntilNs, func() bool { // Check if the client has disconnected by monitoring the context @@ -99,7 +99,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, if errors.Is(readInMemoryLogErr, log_buffer.ResumeFromDiskError) { continue } - glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr) + log.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr) if !errors.Is(readInMemoryLogErr, log_buffer.ResumeError) { break } @@ -108,7 +108,7 @@ func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, return nil } if !fs.hasClient(req.ClientId, req.ClientEpoch) { - glog.V(0).Infof("client %v is closed", clientName) + log.V(3).Infof("client %v is closed", clientName) return nil } @@ -134,13 +134,13 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq return fmt.Errorf("duplicated local subscription detected for client %s clientId:%d", clientName, req.ClientId) } defer func() { - glog.V(0).Infof("disconnect %v local subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) + log.V(3).Infof("disconnect %v local subscriber %s clientId:%d", clientName, req.PathPrefix, req.ClientId) fs.deleteClient("local", clientName, req.ClientId, req.ClientEpoch) fs.listenersCond.Broadcast() // nudges the subscribers that are waiting }() lastReadTime := log_buffer.NewMessagePosition(req.SinceNs, -2) - glog.V(0).Infof(" + %v local subscribe %s from %+v clientId:%d", clientName, req.PathPrefix, lastReadTime, req.ClientId) + log.V(3).Infof(" + %v local subscribe %s from %+v clientId:%d", clientName, req.PathPrefix, lastReadTime, req.ClientId) eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName) @@ -153,10 +153,10 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq for { // println("reading from persisted logs ...") - glog.V(0).Infof("read on disk %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + log.V(3).Infof("read on disk %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) processedTsNs, isDone, readPersistedLogErr = fs.filer.ReadPersistedLogBuffer(lastReadTime, req.UntilNs, eachLogEntryFn) if readPersistedLogErr != nil { - glog.V(0).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr) + log.V(3).Infof("read on disk %v local subscribe %s from %+v: %v", clientName, req.PathPrefix, lastReadTime, readPersistedLogErr) return fmt.Errorf("reading from persisted logs: %v", readPersistedLogErr) } if isDone { @@ -172,7 +172,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq } } - glog.V(0).Infof("read in memory %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + log.V(3).Infof("read in memory %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) lastReadTime, isDone, readInMemoryLogErr = fs.filer.LocalMetaLogBuffer.LoopProcessLogData("localMeta:"+clientName, lastReadTime, req.UntilNs, func() bool { @@ -197,7 +197,7 @@ func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataReq if readInMemoryLogErr == log_buffer.ResumeFromDiskError { continue } - glog.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr) + log.Errorf("processed to %v: %v", lastReadTime, readInMemoryLogErr) if readInMemoryLogErr != log_buffer.ResumeError { break } @@ -218,7 +218,7 @@ func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotificati return func(logEntry *filer_pb.LogEntry) (bool, error) { event := &filer_pb.SubscribeMetadataResponse{} if err := proto.Unmarshal(logEntry.Data, event); err != nil { - glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + log.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) return false, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) } @@ -301,7 +301,7 @@ func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRe } // println("sending", dirPath, entryName) if err := stream.Send(message); err != nil { - glog.V(0).Infof("=> client %v: %+v", clientName, err) + log.V(3).Infof("=> client %v: %+v", clientName, err) return err } filtered = 0 @@ -329,7 +329,7 @@ func matchByDirectory(dirPath string, directories []string) bool { func (fs *FilerServer) addClient(prefix string, clientType string, clientAddress string, clientId int32, clientEpoch int32) (isReplacing, alreadyKnown bool, clientName string) { clientName = clientType + "@" + clientAddress - glog.V(0).Infof("+ %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) + log.V(3).Infof("+ %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) if clientId != 0 { fs.knownListenersLock.Lock() defer fs.knownListenersLock.Unlock() @@ -345,7 +345,7 @@ func (fs *FilerServer) addClient(prefix string, clientType string, clientAddress } func (fs *FilerServer) deleteClient(prefix string, clientName string, clientId int32, clientEpoch int32) { - glog.V(0).Infof("- %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) + log.V(3).Infof("- %v listener %v clientId %v clientEpoch %v", prefix, clientName, clientId, clientEpoch) if clientId != 0 { fs.knownListenersLock.Lock() defer fs.knownListenersLock.Unlock() diff --git a/weed/server/filer_grpc_server_traverse_meta.go b/weed/server/filer_grpc_server_traverse_meta.go index 4a924f065..b393fda5c 100644 --- a/weed/server/filer_grpc_server_traverse_meta.go +++ b/weed/server/filer_grpc_server_traverse_meta.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" "github.com/viant/ptrie" @@ -12,7 +12,7 @@ import ( func (fs *FilerServer) TraverseBfsMetadata(req *filer_pb.TraverseBfsMetadataRequest, stream filer_pb.SeaweedFiler_TraverseBfsMetadataServer) error { - glog.V(0).Infof("TraverseBfsMetadata %v", req) + log.V(3).Infof("TraverseBfsMetadata %v", req) excludedTrie := ptrie.New[bool]() for _, excluded := range req.ExcludedPrefixes { diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 090c795fa..5fad64736 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -42,7 +42,7 @@ import ( _ "github.com/seaweedfs/seaweedfs/weed/filer/sqlite" _ "github.com/seaweedfs/seaweedfs/weed/filer/tarantool" _ "github.com/seaweedfs/seaweedfs/weed/filer/ydb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/notification" _ "github.com/seaweedfs/seaweedfs/weed/notification/aws_sqs" _ "github.com/seaweedfs/seaweedfs/weed/notification/gocdk_pub_sub" @@ -143,7 +143,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) option.Masters.RefreshBySrvIfAvailable() if len(option.Masters.GetInstances()) == 0 { - glog.Fatal("master list is required!") + log.Fatal("master list is required!") } if !util.LoadConfiguration("filer", false) { @@ -153,15 +153,15 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) if os.IsNotExist(err) { os.MkdirAll(option.DefaultLevelDbDir, 0755) } - glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir) + log.V(3).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir) } else { - glog.Warningf("skipping default store dir in %s", option.DefaultLevelDbDir) + log.Warningf("skipping default store dir in %s", option.DefaultLevelDbDir) } util.LoadConfiguration("notification", false) v.SetDefault("filer.options.max_file_name_length", 255) maxFilenameLength := v.GetUint32("filer.options.max_file_name_length") - glog.V(0).Infof("max_file_name_length %d", maxFilenameLength) + log.V(3).Infof("max_file_name_length %d", maxFilenameLength) fs.filer = filer.NewFiler(*option.Masters, fs.grpcDialOption, option.Host, option.FilerGroup, option.Collection, option.DefaultReplication, option.DataCenter, maxFilenameLength, func() { if atomic.LoadInt64(&fs.listenersWaits) > 0 { fs.listenersCond.Broadcast() @@ -201,9 +201,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) existingNodes := fs.filer.ListExistingPeerUpdates(context.Background()) startFromTime := time.Now().Add(-filer.LogFlushInterval) if isFresh { - glog.V(0).Infof("%s bootstrap from peers %+v", option.Host, existingNodes) + log.V(3).Infof("%s bootstrap from peers %+v", option.Host, existingNodes) if err := fs.filer.MaybeBootstrapFromOnePeer(option.Host, existingNodes, startFromTime); err != nil { - glog.Fatalf("%s bootstrap from %+v: %v", option.Host, existingNodes, err) + log.Fatalf("%s bootstrap from %+v: %v", option.Host, existingNodes, err) } } fs.filer.AggregateFromPeers(option.Host, existingNodes, startFromTime) @@ -246,7 +246,7 @@ func (fs *FilerServer) checkWithMaster() { } func (fs *FilerServer) Reload() { - glog.V(0).Infoln("Reload filer server...") + log.V(3).Infoln("Reload filer server...") util.LoadConfiguration("security", false) v := util.GetViper() diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index 1c5c89dcf..5cf15a2a3 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -11,7 +11,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/util" @@ -97,7 +97,7 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { fs.inFlightDataLimitCond.L.Lock() inFlightDataSize := atomic.LoadInt64(&fs.inFlightDataSize) for fs.option.ConcurrentUploadLimit != 0 && inFlightDataSize > fs.option.ConcurrentUploadLimit { - glog.V(4).Infof("wait because inflight data %d > %d", inFlightDataSize, fs.option.ConcurrentUploadLimit) + log.V(-1).Infof("wait because inflight data %d > %d", inFlightDataSize, fs.option.ConcurrentUploadLimit) fs.inFlightDataLimitCond.Wait() inFlightDataSize = atomic.LoadInt64(&fs.inFlightDataSize) } @@ -211,17 +211,17 @@ func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool) tokenStr := security.GetJwt(r) if tokenStr == "" { - glog.V(1).Infof("missing jwt from %s", r.RemoteAddr) + log.V(2).Infof("missing jwt from %s", r.RemoteAddr) return false } token, err := security.DecodeJwt(signingKey, tokenStr, &security.SeaweedFilerClaims{}) if err != nil { - glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) + log.V(2).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) return false } if !token.Valid { - glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) + log.V(2).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) return false } else { return true @@ -231,7 +231,7 @@ func (fs *FilerServer) maybeCheckJwtAuthorization(r *http.Request, isWrite bool) func (fs *FilerServer) filerHealthzHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Server", "SeaweedFS "+util.VERSION) if _, err := fs.filer.Store.FindEntry(context.Background(), filer.TopicsDir); err != nil && err != filer_pb.ErrNotFound { - glog.Warningf("filerHealthzHandler FindEntry: %+v", err) + log.Warningf("filerHealthzHandler FindEntry: %+v", err) w.WriteHeader(http.StatusServiceUnavailable) } else { w.WriteHeader(http.StatusOK) diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go index ca445ef9a..eff17f57d 100644 --- a/weed/server/filer_server_handlers_proxy.go +++ b/weed/server/filer_server_handlers_proxy.go @@ -1,7 +1,7 @@ package weed_server import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/security" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" "github.com/seaweedfs/seaweedfs/weed/util/mem" @@ -34,7 +34,7 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId) if err != nil { - glog.Errorf("locate %s: %v", fileId, err) + log.Errorf("locate %s: %v", fileId, err) w.WriteHeader(http.StatusInternalServerError) return } @@ -46,7 +46,7 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.IntN(len(urlStrings))], r.Body) if err != nil { - glog.Errorf("NewRequest %s: %v", urlStrings[0], err) + log.Errorf("NewRequest %s: %v", urlStrings[0], err) w.WriteHeader(http.StatusInternalServerError) return } @@ -63,7 +63,7 @@ func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Reques proxyResponse, postErr := util_http.GetGlobalHttpClient().Do(proxyReq) if postErr != nil { - glog.Errorf("post to filer: %v", postErr) + log.Errorf("post to filer: %v", postErr) w.WriteHeader(http.StatusInternalServerError) return } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 12371a8f6..acd3b6906 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -21,7 +21,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util/mem" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/images" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -103,11 +103,11 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } if err == filer_pb.ErrNotFound { - glog.V(2).Infof("Not found %s: %v", path, err) + log.V(1).Infof("Not found %s: %v", path, err) stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadNotFound).Inc() w.WriteHeader(http.StatusNotFound) } else { - glog.Errorf("Internal %s: %v", path, err) + log.Errorf("Internal %s: %v", path, err) stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadInternal).Inc() w.WriteHeader(http.StatusInternalServerError) } @@ -244,7 +244,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) defer mem.Free(data) err := filer.ReadAll(data, fs.filer.MasterClient, entry.GetChunks()) if err != nil { - glog.Errorf("failed to read %s: %v", path, err) + log.Errorf("failed to read %s: %v", path, err) w.WriteHeader(http.StatusInternalServerError) return } @@ -260,7 +260,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) _, err := writer.Write(entry.Content[offset : offset+size]) if err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorWriteEntry).Inc() - glog.Errorf("failed to write entry content: %v", err) + log.Errorf("failed to write entry content: %v", err) } return err }, nil @@ -273,7 +273,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) Name: name, }); err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadCache).Inc() - glog.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) + log.Errorf("CacheRemoteObjectToLocalCluster %s: %v", entry.FullPath, err) return nil, fmt.Errorf("cache %s: %v", entry.FullPath, err) } else { chunks = resp.Entry.GetChunks() @@ -283,14 +283,14 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) streamFn, err := filer.PrepareStreamContentWithThrottler(fs.filer.MasterClient, fs.maybeGetVolumeReadJwtAuthorizationToken, chunks, offset, size, fs.option.DownloadMaxBytesPs) if err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() - glog.Errorf("failed to prepare stream content %s: %v", r.URL, err) + log.Errorf("failed to prepare stream content %s: %v", r.URL, err) return nil, err } return func(writer io.Writer) error { err := streamFn(writer) if err != nil { stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadStream).Inc() - glog.Errorf("failed to stream content %s: %v", r.URL, err) + log.Errorf("failed to stream content %s: %v", r.URL, err) } return err }, nil diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 56f0f9cb4..1f7aed16f 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ui "github.com/seaweedfs/seaweedfs/weed/server/filer_ui" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" @@ -43,7 +43,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) if err != nil { - glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) + log.V(3).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) w.WriteHeader(http.StatusNotFound) return } @@ -58,7 +58,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque emptyFolder = false } - glog.V(4).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries)) + log.V(-1).Infof("listDirectory %s, last file %s, limit %d: %d items", path, lastFileName, limit, len(entries)) if r.Header.Get("Accept") == "application/json" { writeJsonQuiet(w, r, http.StatusOK, struct { @@ -103,7 +103,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque fs.option.ShowUIDirectoryDelete, }) if err != nil { - glog.V(0).Infof("Template Execute Error: %v", err) + log.V(3).Infof("Template Execute Error: %v", err) } } diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go index 80ea09d53..f7032837f 100644 --- a/weed/server/filer_server_handlers_tagging.go +++ b/weed/server/filer_server_handlers_tagging.go @@ -5,7 +5,7 @@ import ( "net/http" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -44,7 +44,7 @@ func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) } if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false, fs.filer.MaxFilenameLength); dbErr != nil { - glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr) + log.V(3).Infof("failing to update %s tagging : %v", path, dbErr) writeJsonError(w, r, http.StatusInternalServerError, dbErr) return } @@ -110,7 +110,7 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque } if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil, false, fs.filer.MaxFilenameLength); dbErr != nil { - glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr) + log.V(3).Infof("failing to delete %s tagging : %v", path, dbErr) writeJsonError(w, r, http.StatusInternalServerError, dbErr) return } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 82880c2ac..df3b978f1 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -11,7 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -46,7 +46,7 @@ func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, u assignResult, ae := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest) if ae != nil { - glog.Errorf("failing to assign a file id: %v", ae) + log.Errorf("failing to assign a file id: %v", ae) err = ae return } @@ -93,14 +93,14 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, conte if err == ErrReadOnly { w.WriteHeader(http.StatusInsufficientStorage) } else { - glog.V(1).Infoln("post", r.RequestURI, ":", err.Error()) + log.V(2).Infoln("post", r.RequestURI, ":", err.Error()) w.WriteHeader(http.StatusInternalServerError) } return } if util.FullPath(r.URL.Path).IsLongerFileName(so.MaxFileNameLength) { - glog.V(1).Infoln("post", r.RequestURI, ": ", "entry name too long") + log.V(2).Infoln("post", r.RequestURI, ": ", "entry name too long") w.WriteHeader(http.StatusRequestURITooLong) return } @@ -128,7 +128,7 @@ func (fs *FilerServer) move(ctx context.Context, w http.ResponseWriter, r *http. src := r.URL.Query().Get("mv.from") dst := r.URL.Path - glog.V(2).Infof("FilerServer.move %v to %v", src, dst) + log.V(1).Infof("FilerServer.move %v to %v", src, dst) var err error if src, err = clearName(src); err != nil { @@ -232,7 +232,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { err = fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil, 0) if err != nil && err != filer_pb.ErrNotFound { - glog.V(1).Infoln("deleting", objectPath, ":", err.Error()) + log.V(2).Infoln("deleting", objectPath, ":", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) return } @@ -261,7 +261,7 @@ func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication if ttlSeconds == 0 { ttl, err := needle.ReadTTL(rule.GetTtl()) if err != nil { - glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) + log.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) } ttlSeconds = int32(ttl.Minutes()) * 60 } @@ -284,7 +284,7 @@ func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplicatio ttl, err := needle.ReadTTL(qTtl) if err != nil { - glog.Errorf("fail to parse ttl %s: %v", qTtl, err) + log.Errorf("fail to parse ttl %s: %v", qTtl, err) } so, err := fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack, dataNode) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index b0af7be4b..ee8cf4f52 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -14,7 +14,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants" @@ -239,7 +239,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } mode, err := strconv.ParseUint(modeStr, 8, 32) if err != nil { - glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + log.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) mode = 0660 } @@ -256,7 +256,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa if isAppend || isOffsetWrite { existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path)) if findErr != nil && findErr != filer_pb.ErrNotFound { - glog.V(0).Infof("failing to find %s: %v", path, findErr) + log.V(3).Infof("failing to find %s: %v", path, findErr) } entry = existingEntry } @@ -279,7 +279,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } } else { - glog.V(4).Infoln("saving", path) + log.V(-1).Infoln("saving", path) newChunks = fileChunks entry = &filer.Entry{ FullPath: util.FullPath(path), @@ -301,14 +301,14 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa // maybe concatenate small chunks into one whole chunk mergedChunks, replyerr = fs.maybeMergeChunks(so, newChunks) if replyerr != nil { - glog.V(0).Infof("merge chunks %s: %v", r.RequestURI, replyerr) + log.V(3).Infof("merge chunks %s: %v", r.RequestURI, replyerr) mergedChunks = newChunks } // maybe compact entry chunks mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks) if replyerr != nil { - glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) + log.V(3).Infof("manifestize %s: %v", r.RequestURI, replyerr) return } entry.Chunks = mergedChunks @@ -343,7 +343,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa if dbErr != nil { replyerr = dbErr filerResult.Error = dbErr.Error() - glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) + log.V(3).Infof("failing to write %s to filer server : %v", path, dbErr) } return filerResult, replyerr } @@ -403,7 +403,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http } mode, err := strconv.ParseUint(modeStr, 8, 32) if err != nil { - glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + log.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) mode = 0660 } @@ -419,7 +419,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http return } - glog.V(4).Infoln("mkdir", path) + log.V(-1).Infoln("mkdir", path) entry := &filer.Entry{ FullPath: util.FullPath(path), Attr: filer.Attr{ @@ -439,7 +439,7 @@ func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil, false, so.MaxFileNameLength); dbErr != nil { replyerr = dbErr filerResult.Error = dbErr.Error() - glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr) + log.V(3).Infof("failing to create dir %s on filer server : %v", path, dbErr) } return filerResult, replyerr } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index 9c1628749..0de6cfb0f 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -9,7 +9,7 @@ import ( "time" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -25,7 +25,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter) } - glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + log.V(-1).Infof("write %s to %v", r.URL.Path, urlLocation) // Note: encrypt(gzip(data)), encrypt data first, then gzip diff --git a/weed/server/filer_server_handlers_write_merge.go b/weed/server/filer_server_handlers_write_merge.go index 2110f485a..73bb06351 100644 --- a/weed/server/filer_server_handlers_write_merge.go +++ b/weed/server/filer_server_handlers_write_merge.go @@ -2,7 +2,7 @@ package weed_server import ( "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -56,7 +56,7 @@ func (fs *FilerServer) mergeChunks(so *operation.StorageOption, inputChunks []*f garbage, err := filer.MinusChunks(fs.lookupFileId, inputChunks, mergedChunks) if err != nil { - glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", + log.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", mergedChunks, inputChunks) return mergedChunks, err } diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go index e34fe27e6..0c3ef9418 100644 --- a/weed/server/filer_server_handlers_write_upload.go +++ b/weed/server/filer_server_handlers_write_upload.go @@ -13,7 +13,7 @@ import ( "slices" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/security" @@ -130,7 +130,7 @@ func (fs *FilerServer) uploadReaderToChunks(reader io.Reader, startOffset int64, fileChunksSize := len(fileChunks) + len(chunks) for _, chunk := range chunks { fileChunks = append(fileChunks, chunk) - glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, fileChunksSize, chunk.FileId, offset, offset+int64(chunk.Size)) + log.V(-1).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, fileChunksSize, chunk.FileId, offset, offset+int64(chunk.Size)) } fileChunksLock.Unlock() } @@ -148,9 +148,9 @@ func (fs *FilerServer) uploadReaderToChunks(reader io.Reader, startOffset int64, wg.Wait() if uploadErr != nil { - glog.V(0).Infof("upload file %s error: %v", fileName, uploadErr) + log.V(3).Infof("upload file %s error: %v", fileName, uploadErr) for _, chunk := range fileChunks { - glog.V(4).Infof("purging failed uploaded %s chunk %s [%d,%d)", fileName, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + log.V(-1).Infof("purging failed uploaded %s chunk %s [%d,%d)", fileName, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } fs.filer.DeleteUncommittedChunks(fileChunks) return nil, md5Hash, 0, uploadErr, nil @@ -205,14 +205,14 @@ func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, ch // assign one file id for one chunk fileId, urlLocation, auth, uploadErr = fs.assignNewFileInfo(so) if uploadErr != nil { - glog.V(4).Infof("retry later due to assign error: %v", uploadErr) + log.V(-1).Infof("retry later due to assign error: %v", uploadErr) stats.FilerHandlerCounter.WithLabelValues(stats.ChunkAssignRetry).Inc() return uploadErr } // upload the chunk to the volume server uploadResult, uploadErr, _ = fs.doUpload(urlLocation, dataReader, fileName, contentType, nil, auth) if uploadErr != nil { - glog.V(4).Infof("retry later due to upload error: %v", uploadErr) + log.V(-1).Infof("retry later due to upload error: %v", uploadErr) stats.FilerHandlerCounter.WithLabelValues(stats.ChunkDoUploadRetry).Inc() fid, _ := filer_pb.ToFileIdObject(fileId) fileChunk := filer_pb.FileChunk{ @@ -226,7 +226,7 @@ func (fs *FilerServer) dataToChunk(fileName, contentType string, data []byte, ch return nil }) if err != nil { - glog.Errorf("upload error: %v", err) + log.Errorf("upload error: %v", err) return failedFileChunks, err } diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index dcf279e1d..30a4dfd88 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -19,7 +19,7 @@ import ( "github.com/seaweedfs/raft" "google.golang.org/grpc/peer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/topology" @@ -39,7 +39,7 @@ func (ms *MasterServer) RegisterUuids(heartbeat *master_pb.Heartbeat) (duplicate index := sort.SearchStrings(v, id) if index < len(v) && v[index] == id { duplicated_uuids = append(duplicated_uuids, id) - glog.Errorf("directory of %s on %s has been loaded", id, k) + log.Errorf("directory of %s on %s has been loaded", id, k) } } } @@ -48,7 +48,7 @@ func (ms *MasterServer) RegisterUuids(heartbeat *master_pb.Heartbeat) (duplicate } ms.Topo.UuidMap[key] = heartbeat.LocationUuids - glog.V(0).Infof("found new uuid:%v %v , %v", key, heartbeat.LocationUuids, ms.Topo.UuidMap) + log.V(3).Infof("found new uuid:%v %v , %v", key, heartbeat.LocationUuids, ms.Topo.UuidMap) return nil, nil } @@ -57,7 +57,7 @@ func (ms *MasterServer) UnRegisterUuids(ip string, port int) { defer ms.Topo.UuidAccessLock.Unlock() key := fmt.Sprintf("%s:%d", ip, port) delete(ms.Topo.UuidMap, key) - glog.V(0).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UuidMap) + log.V(3).Infof("remove volume server %v, online volume server: %v", key, ms.Topo.UuidMap) } func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { @@ -67,7 +67,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ if dn != nil { dn.Counter-- if dn.Counter > 0 { - glog.V(0).Infof("disconnect phantom volume server %s:%d remaining %d", dn.Ip, dn.Port, dn.Counter) + log.V(3).Infof("disconnect phantom volume server %s:%d remaining %d", dn.Ip, dn.Port, dn.Counter) return } @@ -87,7 +87,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ // if the volume server disconnects and reconnects quickly // the unregister and register can race with each other ms.Topo.UnRegisterDataNode(dn) - glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) + log.V(3).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) ms.UnRegisterUuids(dn.Ip, dn.Port) if ms.Topo.IsLeader() && (len(message.DeletedVids) > 0 || len(message.DeletedEcVids) > 0) { @@ -100,9 +100,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ heartbeat, err := stream.Recv() if err != nil { if dn != nil { - glog.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err) + log.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err) } else { - glog.Warningf("SendHeartbeat.Recv: %v", err) + log.Warningf("SendHeartbeat.Recv: %v", err) } stats.MasterReceivedHeartbeatCounter.WithLabelValues("error").Inc() return err @@ -112,16 +112,16 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ // tell the volume servers about the leader newLeader, err := ms.Topo.Leader() if err != nil { - glog.Warningf("SendHeartbeat find leader: %v", err) + log.Warningf("SendHeartbeat find leader: %v", err) return err } if err := stream.Send(&master_pb.HeartbeatResponse{ Leader: string(newLeader), }); err != nil { if dn != nil { - glog.Warningf("SendHeartbeat.Send response to %s:%d %v", dn.Ip, dn.Port, err) + log.Warningf("SendHeartbeat.Send response to %s:%d %v", dn.Ip, dn.Port, err) } else { - glog.Warningf("SendHeartbeat.Send response %v", err) + log.Warningf("SendHeartbeat.Send response %v", err) } return err } @@ -138,13 +138,13 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ dc := ms.Topo.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), int(heartbeat.GrpcPort), heartbeat.PublicUrl, heartbeat.MaxVolumeCounts) - glog.V(0).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUuids) + log.V(3).Infof("added volume server %d: %v:%d %v", dn.Counter, heartbeat.GetIp(), heartbeat.GetPort(), heartbeat.LocationUuids) uuidlist, err := ms.RegisterUuids(heartbeat) if err != nil { if stream_err := stream.Send(&master_pb.HeartbeatResponse{ DuplicatedUuids: uuidlist, }); stream_err != nil { - glog.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err) + log.Warningf("SendHeartbeat.Send DuplicatedDirectory response to %s:%d %v", dn.Ip, dn.Port, stream_err) return stream_err } return err @@ -154,7 +154,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, Preallocate: ms.preallocateSize > 0, }); err != nil { - glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) + log.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) return err } stats.MasterReceivedHeartbeatCounter.WithLabelValues("dataNode").Inc() @@ -163,7 +163,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ dn.AdjustMaxVolumeCounts(heartbeat.MaxVolumeCounts) - glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) + log.V(-1).Infof("master received heartbeat %s", heartbeat.String()) stats.MasterReceivedHeartbeatCounter.WithLabelValues("total").Inc() message := &master_pb.VolumeLocation{ @@ -201,11 +201,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ newVolumes, deletedVolumes := ms.Topo.SyncDataNodeRegistration(heartbeat.Volumes, dn) for _, v := range newVolumes { - glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url()) + log.V(3).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url()) message.NewVids = append(message.NewVids, uint32(v.Id)) } for _, v := range deletedVolumes { - glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url()) + log.V(3).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url()) message.DeletedVids = append(message.DeletedVids, uint32(v.Id)) } } @@ -229,7 +229,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards { stats.MasterReceivedHeartbeatCounter.WithLabelValues("ecShards").Inc() - glog.V(4).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) + log.V(-1).Infof("master received ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) newShards, deletedShards := ms.Topo.SyncDataNodeEcShards(heartbeat.EcShards, dn) // broadcast the ec vid changes to master clients @@ -299,7 +299,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ for { _, err := stream.Recv() if err != nil { - glog.V(2).Infof("- client %v: %v", clientName, err) + log.V(1).Infof("- client %v: %v", clientName, err) go func() { // consume message chan to avoid deadlock, go routine exit when message chan is closed for range messageChan { @@ -318,7 +318,7 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ select { case message := <-messageChan: if err := stream.Send(message); err != nil { - glog.V(0).Infof("=> client %v: %+v", clientName, message) + log.V(3).Infof("=> client %v: %+v", clientName, message) return err } case <-ticker.C: @@ -342,10 +342,10 @@ func (ms *MasterServer) broadcastToClients(message *master_pb.KeepConnectedRespo for client, ch := range ms.clientChans { select { case ch <- message: - glog.V(4).Infof("send message to %s", client) + log.V(-1).Infof("send message to %s", client) default: stats.MasterBroadcastToFullErrorCounter.Inc() - glog.Errorf("broadcastToClients %s message full", client) + log.Errorf("broadcastToClients %s message full", client) } } ms.clientChansLock.RUnlock() @@ -354,7 +354,7 @@ func (ms *MasterServer) broadcastToClients(message *master_pb.KeepConnectedRespo func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error { leader, err := ms.Topo.Leader() if err != nil { - glog.Errorf("topo leader: %v", err) + log.Errorf("topo leader: %v", err) return raft.NotLeaderError } if err := stream.Send(&master_pb.KeepConnectedResponse{ @@ -369,7 +369,7 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe func (ms *MasterServer) addClient(filerGroup, clientType string, clientAddress pb.ServerAddress) (clientName string, messageChan chan *master_pb.KeepConnectedResponse) { clientName = filerGroup + "." + clientType + "@" + string(clientAddress) - glog.V(0).Infof("+ client %v", clientName) + log.V(3).Infof("+ client %v", clientName) // we buffer this because otherwise we end up in a potential deadlock where // the KeepConnected loop is no longer listening on this channel but we're @@ -384,7 +384,7 @@ func (ms *MasterServer) addClient(filerGroup, clientType string, clientAddress p } func (ms *MasterServer) deleteClient(clientName string) { - glog.V(0).Infof("- client %v", clientName) + log.V(3).Infof("- client %v", clientName) ms.clientChansLock.Lock() // close message chan, so that the KeepConnected go routine can exit if clientChan, ok := ms.clientChans[clientName]; ok { @@ -398,11 +398,11 @@ func findClientAddress(ctx context.Context, grpcPort uint32) string { // fmt.Printf("FromContext %+v\n", ctx) pr, ok := peer.FromContext(ctx) if !ok { - glog.Error("failed to get peer from ctx") + log.Error("failed to get peer from ctx") return "" } if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") + log.Error("failed to get peer address") return "" } if grpcPort == 0 { diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go index d8b63b5da..1a241c2bf 100644 --- a/weed/server/master_grpc_server_admin.go +++ b/weed/server/master_grpc_server_admin.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/raft" "github.com/seaweedfs/seaweedfs/weed/cluster" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -90,7 +90,7 @@ func (locks *AdminLocks) isLocked(lockName string) (clientName string, message s if !found { return "", "", false } - glog.V(4).Infof("isLocked %v: %v", adminLock.lastClient, adminLock.lastMessage) + log.V(-1).Infof("isLocked %v: %v", adminLock.lastClient, adminLock.lastMessage) return adminLock.lastClient, adminLock.lastMessage, adminLock.accessLockTime.Add(LockDuration).After(time.Now()) } @@ -132,7 +132,7 @@ func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.Leas } if lastClient, lastMessage, isLocked := ms.adminLocks.isLocked(req.LockName); isLocked { - glog.V(4).Infof("LeaseAdminToken %v", lastClient) + log.V(-1).Infof("LeaseAdminToken %v", lastClient) if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { // for renew ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName) diff --git a/weed/server/master_grpc_server_assign.go b/weed/server/master_grpc_server_assign.go index 4820de6a2..6f050af18 100644 --- a/weed/server/master_grpc_server_assign.go +++ b/weed/server/master_grpc_server_assign.go @@ -3,7 +3,7 @@ package weed_server import ( "context" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "strings" "time" @@ -22,16 +22,16 @@ func (ms *MasterServer) StreamAssign(server master_pb.Seaweed_StreamAssignServer for { req, err := server.Recv() if err != nil { - glog.Errorf("StreamAssign failed to receive: %v", err) + log.Errorf("StreamAssign failed to receive: %v", err) return err } resp, err := ms.Assign(context.Background(), req) if err != nil { - glog.Errorf("StreamAssign failed to assign: %v", err) + log.Errorf("StreamAssign failed to assign: %v", err) return err } if err = server.Send(resp); err != nil { - glog.Errorf("StreamAssign failed to send: %v", err) + log.Errorf("StreamAssign failed to send: %v", err) return err } } @@ -98,7 +98,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } } if err != nil { - glog.V(1).Infof("assign %v %v: %v", req, option.String(), err) + log.V(2).Infof("assign %v %v: %v", req, option.String(), err) stats.MasterPickForWriteErrorCounter.Inc() lastErr = err if (req.DataCenter != "" || req.Rack != "") && strings.Contains(err.Error(), topology.NoWritableVolumes) { @@ -134,7 +134,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest }, nil } if lastErr != nil { - glog.V(0).Infof("assign %v %v: %v", req, option.String(), lastErr) + log.V(3).Infof("assign %v %v: %v", req, option.String(), lastErr) } return nil, lastErr } diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index 3a383e259..7388d35b2 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -28,12 +28,12 @@ const ( ) func (ms *MasterServer) DoAutomaticVolumeGrow(req *topology.VolumeGrowRequest) { - glog.V(1).Infoln("starting automatic volume grow") + log.V(2).Infoln("starting automatic volume grow") start := time.Now() newVidLocations, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count) - glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start)) + log.V(2).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start)) if err != nil { - glog.V(1).Infof("automatic volume grow failed: %+v", err) + log.V(2).Infof("automatic volume grow failed: %+v", err) return } for _, newVidLocation := range newVidLocations { @@ -77,7 +77,7 @@ func (ms *MasterServer) ProcessGrowRequest() { _, err = ms.VolumeGrow(ctx, vgr) } if err != nil { - glog.V(0).Infof("volume grow request failed: %+v", err) + log.V(3).Infof("volume grow request failed: %+v", err) } writableVolumes := vl.CloneWritableVolumes() for dcId, racks := range dcs { @@ -92,7 +92,7 @@ func (ms *MasterServer) ProcessGrowRequest() { } if _, err = ms.VolumeGrow(ctx, vgr); err != nil { - glog.V(0).Infof("volume grow request for dc:%s rack:%s failed: %+v", dcId, rackId, err) + log.V(3).Infof("volume grow request for dc:%s rack:%s failed: %+v", dcId, rackId, err) } } } @@ -130,7 +130,7 @@ func (ms *MasterServer) ProcessGrowRequest() { // not atomic but it's okay if found || (!req.Force && !vl.ShouldGrowVolumes()) { - glog.V(4).Infoln("discard volume grow request") + log.V(-1).Infoln("discard volume grow request") time.Sleep(time.Millisecond * 211) vl.DoneGrowRequest() continue @@ -138,7 +138,7 @@ func (ms *MasterServer) ProcessGrowRequest() { filter.Store(req, nil) // we have lock called inside vg - glog.V(0).Infof("volume grow %+v", req) + log.V(3).Infof("volume grow %+v", req) go func(req *topology.VolumeGrowRequest, vl *topology.VolumeLayout) { ms.DoAutomaticVolumeGrow(req) vl.DoneGrowRequest() diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 8621708d2..8b323ecad 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -22,7 +22,7 @@ import ( "github.com/seaweedfs/raft" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/sequence" @@ -125,11 +125,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se seq := ms.createSequencer(option) if nil == seq { - glog.Fatalf("create sequencer failed.") + log.Fatalf("create sequencer failed.") } ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, 5, replicationAsMin) ms.vg = topology.NewDefaultVolumeGrowth() - glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") + log.V(3).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") ms.guard = security.NewGuard(append(ms.option.WhiteList, whiteList...), signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) @@ -178,10 +178,10 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { if raftServer.raftServer != nil { ms.Topo.RaftServer = raftServer.raftServer ms.Topo.RaftServer.AddEventListener(raft.LeaderChangeEventType, func(e raft.Event) { - glog.V(0).Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value()) + log.V(3).Infof("leader change event: %+v => %+v", e.PrevValue(), e.Value()) stats.MasterLeaderChangeCounter.WithLabelValues(fmt.Sprintf("%+v", e.Value())).Inc() if ms.Topo.RaftServer.Leader() != "" { - glog.V(0).Infof("[%s] %s becomes leader.", ms.Topo.RaftServer.Name(), ms.Topo.RaftServer.Leader()) + log.V(3).Infof("[%s] %s becomes leader.", ms.Topo.RaftServer.Name(), ms.Topo.RaftServer.Leader()) ms.Topo.LastLeaderChangeTime = time.Now() } }) @@ -194,7 +194,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { ms.Topo.RaftServerAccessLock.Unlock() if ms.Topo.IsLeader() { - glog.V(0).Infof("%s I am the leader!", raftServerName) + log.V(3).Infof("%s I am the leader!", raftServerName) } else { var raftServerLeader string ms.Topo.RaftServerAccessLock.RLock() @@ -206,7 +206,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { raftServerLeader = string(raftServerLeaderAddr) } ms.Topo.RaftServerAccessLock.RUnlock() - glog.V(0).Infof("%s %s - is the leader.", raftServerName, raftServerLeader) + log.V(3).Infof("%s %s - is the leader.", raftServerName, raftServerLeader) } } @@ -233,7 +233,7 @@ func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc { } // proxy to leader - glog.V(4).Infoln("proxying to leader", raftServerLeader) + log.V(-1).Infoln("proxying to leader", raftServerLeader) proxy := httputil.NewSingleHostReverseProxy(targetUrl) director := proxy.Director proxy.Director = func(req *http.Request) { @@ -254,7 +254,7 @@ func (ms *MasterServer) startAdminScripts() { if adminScripts == "" { return } - glog.V(0).Infof("adminScripts: %v", adminScripts) + log.V(3).Infof("adminScripts: %v", adminScripts) v.SetDefault("master.maintenance.sleep_minutes", 17) sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") @@ -313,12 +313,12 @@ func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEn for _, c := range shell.Commands { if c.Name() == cmd { if c.HasTag(shell.ResourceHeavy) { - glog.Warningf("%s is resource heavy and should not run on master", cmd) + log.Warningf("%s is resource heavy and should not run on master", cmd) continue } - glog.V(0).Infof("executing: %s %v", cmd, args) + log.V(3).Infof("executing: %s %v", cmd, args) if err := c.Do(args, commandEnv, os.Stdout); err != nil { - glog.V(0).Infof("error: %v", err) + log.V(3).Infof("error: %v", err) } } } @@ -328,14 +328,14 @@ func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer var seq sequence.Sequencer v := util.GetViper() seqType := strings.ToLower(v.GetString(SequencerType)) - glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) + log.V(2).Infof("[%s] : [%s]", SequencerType, seqType) switch strings.ToLower(seqType) { case "snowflake": var err error snowflakeId := v.GetInt(SequencerSnowflakeId) seq, err = sequence.NewSnowflakeSequencer(string(option.Master), snowflakeId) if err != nil { - glog.Error(err) + log.Error(err) seq = nil } case "raft": @@ -353,7 +353,7 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF if update.NodeType != cluster.MasterType || ms.Topo.HashicorpRaft == nil { return } - glog.V(4).Infof("OnPeerUpdate: %+v", update) + log.V(-1).Infof("OnPeerUpdate: %+v", update) peerAddress := pb.ServerAddress(update.Address) peerName := string(peerAddress) @@ -368,7 +368,7 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF } } if !raftServerFound { - glog.V(0).Infof("adding new raft server: %s", peerName) + log.V(3).Infof("adding new raft server: %s", peerName) ms.Topo.HashicorpRaft.AddVoter( hashicorpRaft.ServerID(peerName), hashicorpRaft.ServerAddress(peerAddress.ToGrpcAddress()), 0, 0) @@ -378,7 +378,7 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF ctx, cancel := context.WithTimeout(context.TODO(), 15*time.Second) defer cancel() if _, err := client.Ping(ctx, &master_pb.PingRequest{Target: string(peerAddress), TargetType: cluster.MasterType}); err != nil { - glog.V(0).Infof("master %s didn't respond to pings. remove raft server", peerName) + log.V(3).Infof("master %s didn't respond to pings. remove raft server", peerName) if err := ms.MasterClient.WithClient(false, func(client master_pb.SeaweedClient) error { _, err := client.RaftRemoveServer(context.Background(), &master_pb.RaftRemoveServerRequest{ Id: peerName, @@ -386,11 +386,11 @@ func (ms *MasterServer) OnPeerUpdate(update *master_pb.ClusterNodeUpdate, startF }) return err }); err != nil { - glog.Warningf("failed removing old raft server: %v", err) + log.Warningf("failed removing old raft server: %v", err) return err } } else { - glog.V(0).Infof("master %s successfully responded to ping", peerName) + log.V(3).Infof("master %s successfully responded to ping", peerName) } return nil }) @@ -408,7 +408,7 @@ func (ms *MasterServer) Shutdown() { } func (ms *MasterServer) Reload() { - glog.V(0).Infoln("Reload master server...") + log.V(3).Infoln("Reload master server...") util.LoadConfiguration("security", false) v := util.GetViper() diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index f49b04e8c..a6d0e3ff5 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/security" @@ -143,7 +143,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) for time.Now().Sub(startTime) < maxTimeout { fid, count, dnList, shouldGrow, err := ms.Topo.PickForWrite(requestedCount, option, vl) if shouldGrow && !vl.HasGrowRequest() { - glog.V(0).Infof("dirAssign volume growth %v from %v", option.String(), r.RemoteAddr) + log.V(3).Infof("dirAssign volume growth %v from %v", option.String(), r.RemoteAddr) if err != nil && ms.Topo.AvailableSpaceFor(option) <= 0 { err = fmt.Errorf("%s and no free volumes left for %s", err.Error(), option.String()) } diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index fb0503e33..6c3806ce2 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend/memory_map" @@ -61,12 +61,12 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque var err error gcThreshold, err = strconv.ParseFloat(gcString, 32) if err != nil { - glog.V(0).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err) + log.V(3).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err) writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("garbageThreshold %s is not a valid float number", gcString)) return } } - // glog.Infoln("garbageThreshold =", gcThreshold) + // log.Infoln("garbageThreshold =", gcThreshold) ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.option.MaxParallelVacuumPerServer, 0, "", ms.preallocateSize, false) ms.dirStatusHandler(w, r) } @@ -78,7 +78,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request writeJsonError(w, r, http.StatusNotAcceptable, err) return } - glog.V(0).Infof("volumeGrowHandler received %v from %v", option.String(), r.RemoteAddr) + log.V(3).Infof("volumeGrowHandler received %v from %v", option.String(), r.RemoteAddr) if count, err = strconv.ParseUint(r.FormValue("count"), 10, 32); err == nil { replicaCount := int64(count * uint64(option.ReplicaPlacement.GetCopyCount())) diff --git a/weed/server/raft_hashicorp.go b/weed/server/raft_hashicorp.go index 0c6a72d6f..42fb20867 100644 --- a/weed/server/raft_hashicorp.go +++ b/weed/server/raft_hashicorp.go @@ -18,9 +18,9 @@ import ( "github.com/armon/go-metrics/prometheus" "github.com/hashicorp/raft" boltdb "github.com/hashicorp/raft-boltdb/v2" - "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/stats" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/grpc" ) @@ -76,7 +76,7 @@ func (s *RaftServer) monitorLeaderLoop(updatePeers bool) { } else { s.topo.BarrierReset() } - glog.V(0).Infof("is leader %+v change event: %+v => %+v", isLeader, prevLeader, leader) + log.V(3).Infof("is leader %+v change event: %+v => %+v", isLeader, prevLeader, leader) prevLeader = leader s.topo.LastLeaderChangeTime = time.Now() } @@ -97,18 +97,18 @@ func (s *RaftServer) updatePeers() { if peerName == peerLeader || existsPeerName[peerName] { continue } - glog.V(0).Infof("adding new peer: %s", peerName) + log.V(3).Infof("adding new peer: %s", peerName) s.RaftHashicorp.AddVoter( raft.ServerID(peerName), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0) } for peer := range existsPeerName { if _, found := s.peers[peer]; !found { - glog.V(0).Infof("removing old peer: %s", peer) + log.V(3).Infof("removing old peer: %s", peer) s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0) } } if _, found := s.peers[peerLeader]; !found { - glog.V(0).Infof("removing old leader peer: %s", peerLeader) + log.V(3).Infof("removing old leader peer: %s", peerLeader) s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0) } } @@ -128,13 +128,13 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) { if c.LeaderLeaseTimeout > c.HeartbeatTimeout { c.LeaderLeaseTimeout = c.HeartbeatTimeout } - if glog.V(4) { + if log.V(-1).Info != nil { c.LogLevel = "Debug" - } else if glog.V(2) { + } else if log.V(1).Info != nil { c.LogLevel = "Info" - } else if glog.V(1) { + } else if log.V(2).Info != nil { c.LogLevel = "Warn" - } else if glog.V(0) { + } else if log.V(3).Info != nil { c.LogLevel = "Error" } @@ -181,7 +181,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) { // Need to get lock, in case all servers do this at the same time. peerIdx := getPeerIdx(s.serverAddr, s.peers) timeSleep := time.Duration(float64(c.LeaderLeaseTimeout) * (rand.Float64()*0.25 + 1) * float64(peerIdx)) - glog.V(0).Infof("Bootstrapping idx: %d sleep: %v new cluster: %+v", peerIdx, timeSleep, cfg) + log.V(3).Infof("Bootstrapping idx: %d sleep: %v new cluster: %+v", peerIdx, timeSleep, cfg) time.Sleep(timeSleep) f := s.RaftHashicorp.BootstrapCluster(cfg) if err := f.Error(); err != nil { @@ -194,17 +194,17 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) { go s.monitorLeaderLoop(updatePeers) ticker := time.NewTicker(c.HeartbeatTimeout * 10) - if glog.V(4) { + if log.V(-1).Info != nil { go func() { for { select { case <-ticker.C: cfuture := s.RaftHashicorp.GetConfiguration() if err = cfuture.Error(); err != nil { - glog.Fatalf("error getting config: %s", err) + log.Fatalf("error getting config: %s", err) } configuration := cfuture.Configuration() - glog.V(4).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers) + log.V(-1).Infof("Showing peers known by %s:\n%+v", s.RaftHashicorp.String(), configuration.Servers) } } }() diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 4d2209dc0..382a5cd6a 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -17,8 +17,8 @@ import ( hashicorpRaft "github.com/hashicorp/raft" "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/topology" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type RaftServerOption struct { @@ -55,7 +55,7 @@ func (s StateMachine) Save() ([]byte, error) { state := topology.MaxVolumeIdCommand{ MaxVolumeId: s.topo.GetMaxVolumeId(), } - glog.V(1).Infof("Save raft state %+v", state) + log.V(2).Infof("Save raft state %+v", state) return json.Marshal(state) } @@ -65,7 +65,7 @@ func (s StateMachine) Recovery(data []byte) error { if err != nil { return err } - glog.V(1).Infof("Recovery raft state %+v", state) + log.V(2).Infof("Recovery raft state %+v", state) s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId) return nil } @@ -79,7 +79,7 @@ func (s *StateMachine) Apply(l *hashicorpRaft.Log) interface{} { } s.topo.UpAdjustMaxVolumeId(state.MaxVolumeId) - glog.V(1).Infoln("max volume id", before, "==>", s.topo.GetMaxVolumeId()) + log.V(2).Infoln("max volume id", before, "==>", s.topo.GetMaxVolumeId()) return nil } @@ -108,7 +108,7 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) { topo: option.Topo, } - if glog.V(4) { + if log.V(-1).Info != nil { raft.SetLogLevel(2) } @@ -116,7 +116,7 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) { var err error transporter := raft.NewGrpcTransporter(option.GrpcDialOption) - glog.V(0).Infof("Starting RaftServer with %v", option.ServerAddr) + log.V(3).Infof("Starting RaftServer with %v", option.ServerAddr) // always clear previous log to avoid server is promotable os.RemoveAll(path.Join(s.dataDir, "log")) @@ -132,7 +132,7 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) { stateMachine := StateMachine{topo: option.Topo} s.raftServer, err = raft.NewServer(string(s.serverAddr), s.dataDir, transporter, stateMachine, option.Topo, s.serverAddr.ToGrpcAddress()) if err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return nil, err } heartbeatInterval := time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1)) @@ -155,17 +155,17 @@ func NewRaftServer(option *RaftServerOption) (*RaftServer, error) { for existsPeerName := range s.raftServer.Peers() { if existingPeer, found := s.peers[existsPeerName]; !found { if err := s.raftServer.RemovePeer(existsPeerName); err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return nil, err } else { - glog.V(0).Infof("removing old peer: %s", existingPeer) + log.V(3).Infof("removing old peer: %s", existingPeer) } } } s.GrpcServer = raft.NewGrpcServer(s.raftServer) - glog.V(0).Infof("current cluster leader: %v", s.raftServer.Leader()) + log.V(3).Infof("current cluster leader: %v", s.raftServer.Leader()) return s, nil } @@ -187,13 +187,13 @@ func (s *RaftServer) Peers() (members []string) { func (s *RaftServer) DoJoinCommand() { - glog.V(0).Infoln("Initializing new cluster") + log.V(3).Infoln("Initializing new cluster") if _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), ConnectionString: s.serverAddr.ToGrpcAddress(), }); err != nil { - glog.Errorf("fail to send join command: %v", err) + log.Errorf("fail to send join command: %v", err) } } diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go index eba61569e..d815e117e 100644 --- a/weed/server/raft_server_handlers.go +++ b/weed/server/raft_server_handlers.go @@ -2,7 +2,7 @@ package weed_server import ( "github.com/cenkalti/backoff/v4" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "net/http" @@ -42,7 +42,7 @@ func (s *RaftServer) HealthzHandler(w http.ResponseWriter, r *http.Request) { expBackoff.MaxElapsedTime = 5 * time.Second isLocked, err := backoff.RetryWithData(s.topo.IsChildLocked, expBackoff) if err != nil { - glog.Errorf("HealthzHandler: %+v", err) + log.Errorf("HealthzHandler: %+v", err) } if isLocked { w.WriteHeader(http.StatusLocked) diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index 2c5a538e7..57cafff18 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -29,9 +29,9 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server err := vs.store.DeleteCollection(req.Collection) if err != nil { - glog.Errorf("delete collection %s: %v", req.Collection, err) + log.Errorf("delete collection %s: %v", req.Collection, err) } else { - glog.V(2).Infof("delete collection %v", req) + log.V(1).Infof("delete collection %v", req) } return resp, err @@ -55,9 +55,9 @@ func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_p ) if err != nil { - glog.Errorf("assign volume %v: %v", req, err) + log.Errorf("assign volume %v: %v", req, err) } else { - glog.V(2).Infof("assign volume %v", req) + log.V(1).Infof("assign volume %v", req) } return resp, err @@ -71,9 +71,9 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("volume mount %v: %v", req, err) + log.Errorf("volume mount %v: %v", req, err) } else { - glog.V(2).Infof("volume mount %v", req) + log.V(1).Infof("volume mount %v", req) } return resp, err @@ -87,9 +87,9 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("volume unmount %v: %v", req, err) + log.Errorf("volume unmount %v: %v", req, err) } else { - glog.V(2).Infof("volume unmount %v", req) + log.V(1).Infof("volume unmount %v", req) } return resp, err @@ -103,9 +103,9 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId), req.OnlyEmpty) if err != nil { - glog.Errorf("volume delete %v: %v", req, err) + log.Errorf("volume delete %v: %v", req, err) } else { - glog.V(2).Infof("volume delete %v", req) + log.V(1).Infof("volume delete %v", req) } return resp, err @@ -124,21 +124,21 @@ func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_ // unmount if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { - glog.Errorf("volume configure unmount %v: %v", req, err) + log.Errorf("volume configure unmount %v: %v", req, err) resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) return resp, nil } // modify the volume info file if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { - glog.Errorf("volume configure %v: %v", req, err) + log.Errorf("volume configure %v: %v", req, err) resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) return resp, nil } // mount if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { - glog.Errorf("volume configure mount %v: %v", req, err) + log.Errorf("volume configure mount %v: %v", req, err) resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) return resp, nil } @@ -167,9 +167,9 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId), req.GetPersist()) if err != nil { - glog.Errorf("volume mark readonly %v: %v", req, err) + log.Errorf("volume mark readonly %v: %v", req, err) } else { - glog.V(2).Infof("volume mark readonly %v", req) + log.V(1).Infof("volume mark readonly %v", req) } // step 3: tell master from redirecting traffic here again, to prevent rare case 1.5 @@ -197,7 +197,7 @@ func (vs *VolumeServer) notifyMasterVolumeReadonly(v *storage.Volume, isReadOnly } return nil }); grpcErr != nil { - glog.V(0).Infof("connect to %s: %v", vs.GetMaster(context.Background()), grpcErr) + log.V(3).Infof("connect to %s: %v", vs.GetMaster(context.Background()), grpcErr) return fmt.Errorf("grpc VolumeMarkReadonly with master %s: %v", vs.GetMaster(context.Background()), grpcErr) } return nil @@ -215,9 +215,9 @@ func (vs *VolumeServer) VolumeMarkWritable(ctx context.Context, req *volume_serv err := vs.store.MarkVolumeWritable(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("volume mark writable %v: %v", req, err) + log.Errorf("volume mark writable %v: %v", req, err) } else { - glog.V(2).Infof("volume mark writable %v", req) + log.V(1).Infof("volume mark writable %v", req) } // enable master to redirect traffic here diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 2f9bc5965..1cba905b4 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -16,7 +16,7 @@ import ( "golang.org/x/net/context" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -40,7 +40,7 @@ func (vs *VolumeServer) checkWithMaster() (err error) { if err == nil { return } else { - glog.V(0).Infof("checkWithMaster %s: %v", master, err) + log.V(3).Infof("checkWithMaster %s: %v", master, err) } } time.Sleep(1790 * time.Millisecond) @@ -49,7 +49,7 @@ func (vs *VolumeServer) checkWithMaster() (err error) { func (vs *VolumeServer) heartbeat() { - glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes) + log.V(3).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes) vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) @@ -68,7 +68,7 @@ func (vs *VolumeServer) heartbeat() { vs.store.MasterAddress = master newLeader, err = vs.doHeartbeat(master, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { - glog.V(0).Infof("heartbeat to %s error: %v", master, err) + log.V(3).Infof("heartbeat to %s error: %v", master, err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) newLeader = "" vs.store.MasterAddress = "" @@ -103,10 +103,10 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti client := master_pb.NewSeaweedClient(grpcConnection) stream, err := client.SendHeartbeat(ctx) if err != nil { - glog.V(0).Infof("SendHeartbeat to %s: %v", masterAddress, err) + log.V(3).Infof("SendHeartbeat to %s: %v", masterAddress, err) return "", err } - glog.V(0).Infof("Heartbeat to: %v", masterAddress) + log.V(3).Infof("Heartbeat to: %v", masterAddress) vs.currentMaster = masterAddress doneChan := make(chan error, 1) @@ -127,7 +127,7 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti } } } - glog.Errorf("Shut down Volume Server due to duplicate volume directories: %v", duplicateDir) + log.Errorf("Shut down Volume Server due to duplicate volume directories: %v", duplicateDir) os.Exit(1) } volumeOptsChanged := false @@ -142,13 +142,13 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti if volumeOptsChanged { if vs.store.MaybeAdjustVolumeMax() { if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", vs.currentMaster, err) + log.V(3).Infof("Volume Server Failed to talk with master %s: %v", vs.currentMaster, err) return } } } if in.GetLeader() != "" && string(vs.currentMaster) != in.GetLeader() { - glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), vs.currentMaster) + log.V(3).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), vs.currentMaster) newLeader = pb.ServerAddress(in.GetLeader()) doneChan <- nil return @@ -157,12 +157,12 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti }() if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) return "", err } if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) return "", err } @@ -186,9 +186,9 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti &volumeMessage, }, } - glog.V(0).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) + log.V(3).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) if err = stream.Send(deltaBeat); err != nil { - glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) return "", err } case ecShardMessage := <-vs.store.NewEcShardsChan: @@ -201,10 +201,10 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti &ecShardMessage, }, } - glog.V(0).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + log.V(3).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) if err = stream.Send(deltaBeat); err != nil { - glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) return "", err } case volumeMessage := <-vs.store.DeletedVolumesChan: @@ -217,9 +217,9 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti &volumeMessage, }, } - glog.V(0).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) + log.V(3).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) if err = stream.Send(deltaBeat); err != nil { - glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) return "", err } case ecShardMessage := <-vs.store.DeletedEcShardsChan: @@ -232,23 +232,23 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti &ecShardMessage, }, } - glog.V(0).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + log.V(3).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) if err = stream.Send(deltaBeat); err != nil { - glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) return "", err } case <-volumeTickChan.C: - glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) + log.V(-1).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) vs.store.MaybeAdjustVolumeMax() if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) return "", err } case <-ecShardTickChan.C: - glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port) + log.V(-1).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port) if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to talk with master %s: %v", masterAddress, err) return "", err } case err = <-doneChan: @@ -265,9 +265,9 @@ func (vs *VolumeServer) doHeartbeat(masterAddress pb.ServerAddress, grpcDialOpti Volumes: volumeMessages, HasNoVolumes: len(volumeMessages) == 0, } - glog.V(1).Infof("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port) + log.V(2).Infof("volume server %s:%d stops and deletes all volumes", vs.store.Ip, vs.store.Port) if err = stream.Send(emptyBeat); err != nil { - glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) + log.V(3).Infof("Volume Server Failed to update to master %s: %v", masterAddress, err) return "", err } return diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index d34a74f55..62d65dbfa 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -11,7 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -30,14 +30,14 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) if v != nil { - glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId) + log.V(3).Infof("volume %d already exists. deleted before copying...", req.VolumeId) err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId), false) if err != nil { return fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err) } - glog.V(0).Infof("deleted existing volume %d before copying.", req.VolumeId) + log.V(3).Infof("deleted existing volume %d before copying.", req.VolumeId) } // the master will not start compaction for read-only volumes, so it is safe to just copy files directly @@ -96,7 +96,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre } return nil }); grpcErr != nil { - glog.V(0).Infof("connect to %s: %v", vs.GetMaster(context.Background()), grpcErr) + log.V(3).Infof("connect to %s: %v", vs.GetMaster(context.Background()), grpcErr) } if preallocateSize > 0 && !hasRemoteDatFile { @@ -192,7 +192,7 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre if err = stream.Send(&volume_server_pb.VolumeCopyResponse{ LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second), }); err != nil { - glog.Errorf("send response: %v", err) + log.Errorf("send response: %v", err) } return err @@ -257,7 +257,7 @@ func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse } func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool, progressFn storage.ProgressFunc) (modifiedTsNs int64, err error) { - glog.V(4).Infof("writing to %s", fileName) + log.V(-1).Infof("writing to %s", fileName) flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC if isAppend { flags = os.O_WRONLY | os.O_CREATE diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 642e8cce3..538fe757d 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -38,7 +38,7 @@ Steps to apply erasure coding to .dat .idx files // VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { - glog.V(0).Infof("VolumeEcShardsGenerate: %v", req) + log.V(3).Infof("VolumeEcShardsGenerate: %v", req) v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) if v == nil { @@ -96,7 +96,7 @@ func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_ // VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { - glog.V(0).Infof("VolumeEcShardsRebuild: %v", req) + log.V(3).Infof("VolumeEcShardsRebuild: %v", req) baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) @@ -138,7 +138,7 @@ func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_s // VolumeEcShardsCopy copy the .ecx and some ec data slices func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) { - glog.V(0).Infof("VolumeEcShardsCopy: %v", req) + log.V(3).Infof("VolumeEcShardsCopy: %v", req) var location *storage.DiskLocation if req.CopyEcxFile { @@ -205,11 +205,11 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se bName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) - glog.V(0).Infof("ec volume %s shard delete %v", bName, req.ShardIds) + log.V(3).Infof("ec volume %s shard delete %v", bName, req.ShardIds) for _, location := range vs.store.Locations { if err := deleteEcShardIdsForEachLocation(bName, location, req.ShardIds); err != nil { - glog.Errorf("deleteEcShards from %s %s.%v: %v", location.Directory, bName, req.ShardIds, err) + log.Errorf("deleteEcShards from %s %s.%v: %v", location.Directory, bName, req.ShardIds, err) return nil, err } } @@ -289,15 +289,15 @@ func checkEcVolumeStatus(bName string, location *storage.DiskLocation) (hasEcxFi func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) { - glog.V(0).Infof("VolumeEcShardsMount: %v", req) + log.V(3).Infof("VolumeEcShardsMount: %v", req) for _, shardId := range req.ShardIds { err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) if err != nil { - glog.Errorf("ec shard mount %v: %v", req, err) + log.Errorf("ec shard mount %v: %v", req, err) } else { - glog.V(2).Infof("ec shard mount %v", req) + log.V(1).Infof("ec shard mount %v", req) } if err != nil { @@ -310,15 +310,15 @@ func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_ser func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) { - glog.V(0).Infof("VolumeEcShardsUnmount: %v", req) + log.V(3).Infof("VolumeEcShardsUnmount: %v", req) for _, shardId := range req.ShardIds { err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) if err != nil { - glog.Errorf("ec shard unmount %v: %v", req, err) + log.Errorf("ec shard unmount %v: %v", req, err) } else { - glog.V(2).Infof("ec shard unmount %v", req) + log.V(1).Infof("ec shard unmount %v", req) } if err != nil { @@ -399,7 +399,7 @@ func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardRea func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) { - glog.V(0).Infof("VolumeEcBlobDelete: %v", req) + log.V(3).Infof("VolumeEcBlobDelete: %v", req) resp := &volume_server_pb.VolumeEcBlobDeleteResponse{} @@ -429,7 +429,7 @@ func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_serv // VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) { - glog.V(0).Infof("VolumeEcShardsToVolume: %v", req) + log.V(3).Infof("VolumeEcShardsToVolume: %v", req) // collect .ec00 ~ .ec09 files shardFileNames := make([]string, erasure_coding.DataShardsCount) diff --git a/weed/server/volume_grpc_query.go b/weed/server/volume_grpc_query.go index a1abcb8eb..eb2cd83a0 100644 --- a/weed/server/volume_grpc_query.go +++ b/weed/server/volume_grpc_query.go @@ -1,7 +1,7 @@ package weed_server import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/query/json" @@ -15,7 +15,7 @@ func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_ vid, id_cookie, err := operation.ParseFileId(fid) if err != nil { - glog.V(0).Infof("volume query failed to parse fid %s: %v", fid, err) + log.V(3).Infof("volume query failed to parse fid %s: %v", fid, err) return err } @@ -25,12 +25,12 @@ func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_ cookie := n.Cookie if _, err := vs.store.ReadVolumeNeedle(volumeId, n, nil, nil); err != nil { - glog.V(0).Infof("volume query failed to read fid %s: %v", fid, err) + log.V(3).Infof("volume query failed to read fid %s: %v", fid, err) return err } if n.Cookie != cookie { - glog.V(0).Infof("volume query failed to read fid cookie %s: %v", fid, err) + log.V(3).Infof("volume query failed to read fid cookie %s: %v", fid, err) return err } diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go index b44d7d248..b2c11820d 100644 --- a/weed/server/volume_grpc_tail.go +++ b/weed/server/volume_grpc_tail.go @@ -6,7 +6,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/pb" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage" @@ -21,7 +21,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR return fmt.Errorf("not found volume id %d", req.VolumeId) } - defer glog.V(1).Infof("tailing volume %d finished", v.Id) + defer log.V(2).Infof("tailing volume %d finished", v.Id) lastTimestampNs := req.SinceNs drainingSeconds := req.IdleTimeoutSeconds @@ -29,7 +29,7 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR for { lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs) if err != nil { - glog.Infof("sendNeedlesSince: %v", err) + log.Infof("sendNeedlesSince: %v", err) return fmt.Errorf("streamFollow: %v", err) } time.Sleep(2 * time.Second) @@ -43,11 +43,11 @@ func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderR if drainingSeconds <= 0 { return nil } - glog.V(1).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds) + log.V(2).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds) } else { lastTimestampNs = lastProcessedTimestampNs drainingSeconds = req.IdleTimeoutSeconds - glog.V(1).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds) + log.V(2).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds) } } @@ -88,7 +88,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv return resp, fmt.Errorf("receiver not found volume id %d", req.VolumeId) } - defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) + defer log.V(2).Infof("receive tailing volume %d finished", v.Id) return resp, operation.TailVolumeFromSource(pb.ServerAddress(req.SourceVolumeServer), vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { _, err := vs.store.WriteVolumeNeedle(v.Id, n, false, false) diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index 990611052..ad3f92671 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -10,7 +10,7 @@ import ( "runtime" "github.com/prometheus/procfs" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -26,7 +26,7 @@ func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_serve resp.GarbageRatio = garbageRatio if err != nil { - glog.V(3).Infof("check volume %d: %v", req.VolumeId, err) + log.V(0).Infof("check volume %d: %v", req.VolumeId, err) } return resp, err @@ -62,15 +62,15 @@ func (vs *VolumeServer) VacuumVolumeCompact(req *volume_server_pb.VacuumVolumeCo stats.VolumeServerVacuumingCompactCounter.WithLabelValues(strconv.FormatBool(err == nil && sendErr == nil)).Inc() if err != nil { - glog.Errorf("failed compact volume %d: %v", req.VolumeId, err) + log.Errorf("failed compact volume %d: %v", req.VolumeId, err) return err } if sendErr != nil { - glog.Errorf("failed compact volume %d report progress: %v", req.VolumeId, sendErr) + log.Errorf("failed compact volume %d report progress: %v", req.VolumeId, sendErr) return sendErr } - glog.V(1).Infof("compact volume %d", req.VolumeId) + log.V(2).Infof("compact volume %d", req.VolumeId) return nil } @@ -86,9 +86,9 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv readOnly, volumeSize, err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("failed commit volume %d: %v", req.VolumeId, err) + log.Errorf("failed commit volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("commit volume %d", req.VolumeId) + log.V(2).Infof("commit volume %d", req.VolumeId) } stats.VolumeServerVacuumingCommitCounter.WithLabelValues(strconv.FormatBool(err == nil)).Inc() resp.IsReadOnly = readOnly @@ -104,9 +104,9 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser err := vs.store.CommitCleanupVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("failed cleanup volume %d: %v", req.VolumeId, err) + log.Errorf("failed cleanup volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("cleanup volume %d", req.VolumeId) + log.V(2).Infof("cleanup volume %d", req.VolumeId) } return resp, err diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index a3f072eb7..04bbd90a0 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -14,7 +14,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/storage" ) @@ -140,23 +140,23 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, } func (vs *VolumeServer) SetStopping() { - glog.V(0).Infoln("Stopping volume server...") + log.V(3).Infoln("Stopping volume server...") vs.store.SetStopping() } func (vs *VolumeServer) LoadNewVolumes() { - glog.V(0).Infoln(" Loading new volume ids ...") + log.V(3).Infoln(" Loading new volume ids ...") vs.store.LoadNewVolumes() } func (vs *VolumeServer) Shutdown() { - glog.V(0).Infoln("Shutting down volume server...") + log.V(3).Infoln("Shutting down volume server...") vs.store.Close() - glog.V(0).Infoln("Shut down successfully!") + log.V(3).Infoln("Shut down successfully!") } func (vs *VolumeServer) Reload() { - glog.V(0).Infoln("Reload volume server...") + log.V(3).Infoln("Reload volume server...") util.LoadConfiguration("security", false) v := util.GetViper() diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 22ef0e1c8..df08cde88 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -10,7 +10,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/stats" ) @@ -56,12 +56,12 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque for vs.concurrentDownloadLimit != 0 && inFlightDownloadSize > vs.concurrentDownloadLimit { select { case <-r.Context().Done(): - glog.V(4).Infof("request cancelled from %s: %v", r.RemoteAddr, r.Context().Err()) + log.V(-1).Infof("request cancelled from %s: %v", r.RemoteAddr, r.Context().Err()) w.WriteHeader(util.HttpStatusCancelled) vs.inFlightDownloadDataLimitCond.L.Unlock() return default: - glog.V(4).Infof("wait because inflight download data %d > %d", inFlightDownloadSize, vs.concurrentDownloadLimit) + log.V(-1).Infof("wait because inflight download data %d > %d", inFlightDownloadSize, vs.concurrentDownloadLimit) vs.inFlightDownloadDataLimitCond.Wait() } inFlightDownloadSize = atomic.LoadInt64(&vs.inFlightDownloadDataSize) @@ -83,11 +83,11 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque if startTime.Add(vs.inflightUploadDataTimeout).Before(time.Now()) { vs.inFlightUploadDataLimitCond.L.Unlock() err := fmt.Errorf("reject because inflight upload data %d > %d, and wait timeout", inFlightUploadDataSize, vs.concurrentUploadLimit) - glog.V(1).Infof("too many requests: %v", err) + log.V(2).Infof("too many requests: %v", err) writeJsonError(w, r, http.StatusTooManyRequests, err) return } - glog.V(4).Infof("wait because inflight upload data %d > %d", inFlightUploadDataSize, vs.concurrentUploadLimit) + log.V(-1).Infof("wait because inflight upload data %d > %d", inFlightUploadDataSize, vs.concurrentUploadLimit) vs.inFlightUploadDataLimitCond.Wait() inFlightUploadDataSize = atomic.LoadInt64(&vs.inFlightUploadDataSize) } @@ -149,7 +149,7 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req vs.inFlightDownloadDataLimitCond.L.Lock() inFlightDownloadSize := atomic.LoadInt64(&vs.inFlightDownloadDataSize) for vs.concurrentDownloadLimit != 0 && inFlightDownloadSize > vs.concurrentDownloadLimit { - glog.V(4).Infof("wait because inflight download data %d > %d", inFlightDownloadSize, vs.concurrentDownloadLimit) + log.V(-1).Infof("wait because inflight download data %d > %d", inFlightDownloadSize, vs.concurrentDownloadLimit) vs.inFlightDownloadDataLimitCond.Wait() inFlightDownloadSize = atomic.LoadInt64(&vs.inFlightDownloadDataSize) } @@ -182,17 +182,17 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str tokenStr := security.GetJwt(r) if tokenStr == "" { - glog.V(1).Infof("missing jwt from %s", r.RemoteAddr) + log.V(2).Infof("missing jwt from %s", r.RemoteAddr) return false } token, err := security.DecodeJwt(signingKey, tokenStr, &security.SeaweedFileIdClaims{}) if err != nil { - glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) + log.V(2).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) return false } if !token.Valid { - glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) + log.V(2).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) return false } @@ -202,6 +202,6 @@ func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid str } return sc.Fid == vid+","+fid } - glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) + log.V(2).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) return false } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 15d639f49..bee9f757e 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -20,7 +20,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util/mem" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/images" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -53,30 +53,30 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) volumeId, err := needle.NewVolumeId(vid) if err != nil { - glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err) + log.V(1).Infof("parsing vid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } err = n.ParsePath(fid) if err != nil { - glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err) + log.V(1).Infof("parsing fid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } - // glog.V(4).Infoln("volume", volumeId, "reading", n) + // log.V(-1).Infoln("volume", volumeId, "reading", n) hasVolume := vs.store.HasVolume(volumeId) _, hasEcVolume := vs.store.FindEcVolume(volumeId) if !hasVolume && !hasEcVolume { if vs.ReadMode == "local" { - glog.V(0).Infoln("volume is not local:", err, r.URL.Path) + log.V(3).Infoln("volume is not local:", err, r.URL.Path) NotFound(w) return } lookupResult, err := operation.LookupVolumeId(vs.GetMaster, vs.grpcDialOption, volumeId.String()) - glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err) + log.V(1).Infoln("volume", volumeId, "found on", lookupResult, "error", err) if err != nil || len(lookupResult.Locations) <= 0 { - glog.V(0).Infoln("lookup error:", err, r.URL.Path) + log.V(3).Infoln("lookup error:", err, r.URL.Path) NotFound(w) return } @@ -88,7 +88,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) r.URL.Scheme = u.Scheme request, err := http.NewRequest(http.MethodGet, r.URL.String(), nil) if err != nil { - glog.V(0).Infof("failed to instance http request of url %s: %v", r.URL.String(), err) + log.V(3).Infof("failed to instance http request of url %s: %v", r.URL.String(), err) InternalError(w) return } @@ -100,7 +100,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) response, err := util_http.GetGlobalHttpClient().Do(request) if err != nil { - glog.V(0).Infof("request remote url %s: %v", r.URL.String(), err) + log.V(3).Infof("request remote url %s: %v", r.URL.String(), err) InternalError(w) return } @@ -156,12 +156,12 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) }() if err != nil && err != storage.ErrorDeleted && hasVolume { - glog.V(4).Infof("read needle: %v", err) + log.V(-1).Infof("read needle: %v", err) // start to fix it from other replicas, if not deleted and hasVolume and is not a replicated request } - // glog.V(4).Infoln("read bytes", count, "error", err) + // log.V(-1).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { - glog.V(3).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) + log.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) if err == storage.ErrorNotFound || err == storage.ErrorDeleted { NotFound(w) } else { @@ -170,7 +170,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } if n.Cookie != cookie { - glog.V(0).Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent()) + log.V(3).Infof("request %s with cookie:%x expected:%x from %s agent %s", r.URL.Path, cookie, n.Cookie, r.RemoteAddr, r.UserAgent()) NotFound(w) return } @@ -195,7 +195,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) pairMap := make(map[string]string) err = json.Unmarshal(n.Pairs, &pairMap) if err != nil { - glog.V(0).Infoln("Unmarshal pairs error:", err) + log.V(3).Infoln("Unmarshal pairs error:", err) } for k, v := range pairMap { w.Header().Set(k, v) @@ -225,7 +225,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) _, _, _, _, shouldCrop := shouldCropImages(ext, r) if shouldResize || shouldCrop { if n.Data, err = util.DecompressData(n.Data); err != nil { - glog.V(0).Infoln("ungzip error:", err, r.URL.Path) + log.V(3).Infoln("ungzip error:", err, r.URL.Path) } // } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) { // w.Header().Set("Content-Encoding", "zstd") @@ -233,7 +233,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.Header().Set("Content-Encoding", "gzip") } else { if n.Data, err = util.DecompressData(n.Data); err != nil { - glog.V(0).Infoln("uncompress error:", err, r.URL.Path) + log.V(3).Infoln("uncompress error:", err, r.URL.Path) } } } @@ -242,7 +242,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) rs := conditionallyCropImages(bytes.NewReader(n.Data), ext, r) rs = conditionallyResizeImages(rs, ext, r) if e := writeResponseContent(filename, mtype, rs, w, r); e != nil { - glog.V(2).Infoln("response write error:", e) + log.V(1).Infoln("response write error:", e) } } else { vs.streamWriteResponseContent(filename, mtype, volumeId, n, w, r, readOption) @@ -274,7 +274,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed()) if e != nil { - glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e) + log.V(3).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e) return false } if fileName == "" && chunkManifest.Name != "" { @@ -302,7 +302,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, rs = conditionallyResizeImages(rs, ext, r) if e := writeResponseContent(fileName, mType, rs, w, r); e != nil { - glog.V(2).Infoln("response write error:", e) + log.V(1).Infoln("response write error:", e) } return true } @@ -343,7 +343,7 @@ func conditionallyCropImages(originalDataReaderSeeker io.ReadSeeker, ext string, var err error rs, err = images.Cropped(ext, rs, x1, y1, x2, y2) if err != nil { - glog.Errorf("Cropping images error: %s", err) + log.Errorf("Cropping images error: %s", err) } } return rs diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 7f0fcc871..46db10119 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/topology" @@ -17,7 +17,7 @@ import ( func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { if e := r.ParseForm(); e != nil { - glog.V(0).Infoln("form parse error:", e) + log.V(3).Infoln("form parse error:", e) writeJsonError(w, r, http.StatusBadRequest, e) return } @@ -25,7 +25,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { vid, fid, _, _, _ := parseURLPath(r.URL.Path) volumeId, ve := needle.NewVolumeId(vid) if ve != nil { - glog.V(0).Infoln("NewVolumeId error:", ve) + log.V(3).Infoln("NewVolumeId error:", ve) writeJsonError(w, r, http.StatusBadRequest, ve) return } @@ -81,7 +81,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { return } - // glog.V(2).Infof("volume %s deleting %s", vid, n) + // log.V(1).Infof("volume %s deleting %s", vid, n) cookie := n.Cookie @@ -102,7 +102,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { } if n.Cookie != cookie { - glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent()) + log.V(3).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent()) writeJsonError(w, r, http.StatusBadRequest, errors.New("File Random Cookie does not match.")) return } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index dbe6dfed5..fece54f69 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -20,7 +20,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/security" ) @@ -174,7 +174,7 @@ func clearName(name string) (string, error) { func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error { - glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath) + log.V(1).Infof("WebDavFileSystem.Mkdir %v", fullDirPath) if !strings.HasSuffix(fullDirPath, "/") { fullDirPath += "/" @@ -208,7 +208,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm Signatures: []int32{fs.signature}, } - glog.V(1).Infof("mkdir: %v", request) + log.V(2).Infof("mkdir: %v", request) if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -218,7 +218,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { - glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) + log.V(1).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) var err error if fullFilePath, err = clearName(fullFilePath); err != nil { @@ -305,14 +305,14 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error { - glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name) + log.V(1).Infof("WebDavFileSystem.RemoveAll %v", name) return fs.removeAll(ctx, name) } func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error { - glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName) + log.V(1).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName) var err error if oldName, err = clearName(oldName); err != nil { @@ -397,7 +397,7 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F } func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) { - glog.V(2).Infof("WebDavFileSystem.Stat %v", name) + log.V(1).Infof("WebDavFileSystem.Stat %v", name) return fs.stat(ctx, name) } @@ -405,7 +405,7 @@ func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) { uploader, uploaderErr := operation.NewUploader() if uploaderErr != nil { - glog.V(0).Infof("upload data %v: %v", f.name, uploaderErr) + log.V(3).Infof("upload data %v: %v", f.name, uploaderErr) return nil, fmt.Errorf("upload data: %v", uploaderErr) } @@ -432,11 +432,11 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64 ) if flushErr != nil { - glog.V(0).Infof("upload data %v: %v", f.name, flushErr) + log.V(3).Infof("upload data %v: %v", f.name, flushErr) return nil, fmt.Errorf("upload data: %v", flushErr) } if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v: %v", f.name, flushErr) + log.V(3).Infof("upload failure %v: %v", f.name, flushErr) return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } return uploadResult.ToPbFileChunk(fileId, offset, tsNs), nil @@ -444,7 +444,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64 func (f *WebDavFile) Write(buf []byte) (int, error) { - glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + log.V(1).Infof("WebDavFileSystem.Write %v", f.name) fullPath := util.FullPath(f.name) dir, _ := fullPath.DirAndName() @@ -471,7 +471,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { if flushErr != nil { if f.entry.Attributes.Mtime == 0 { if err := f.fs.removeAll(ctx, f.name); err != nil { - glog.Errorf("bufWriter.Flush remove file error: %+v", f.name) + log.Errorf("bufWriter.Flush remove file error: %+v", f.name) } } return fmt.Errorf("%s upload result: %v", f.name, flushErr) @@ -487,7 +487,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { manifestedChunks, manifestErr := filer.MaybeManifestize(f.saveDataAsChunk, f.entry.GetChunks()) if manifestErr != nil { // not good, but should be ok - glog.V(0).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr) + log.V(3).Infof("file %s close MaybeManifestize: %v", f.name, manifestErr) } else { f.entry.Chunks = manifestedChunks } @@ -515,7 +515,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { if err == nil { f.entry.Attributes.FileSize = uint64(max(f.off+int64(written), int64(f.entry.Attributes.FileSize))) - glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) + log.V(0).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) f.off += int64(written) } @@ -524,7 +524,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { func (f *WebDavFile) Close() error { - glog.V(2).Infof("WebDavFileSystem.Close %v", f.name) + log.V(1).Infof("WebDavFileSystem.Close %v", f.name) if f.bufWriter == nil { return nil } @@ -540,7 +540,7 @@ func (f *WebDavFile) Close() error { func (f *WebDavFile) Read(p []byte) (readSize int, err error) { - glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) + log.V(1).Infof("WebDavFileSystem.Read %v", f.name) if f.entry == nil { f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) @@ -566,11 +566,11 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { readSize, err = f.reader.ReadAt(p, f.off) - glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) + log.V(0).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) f.off += int64(readSize) if err != nil && err != io.EOF { - glog.Errorf("file read %s: %v", f.name, err) + log.Errorf("file read %s: %v", f.name, err) } return @@ -579,7 +579,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { - glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) + log.V(1).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) dir, _ := util.FullPath(f.name).DirAndName() @@ -595,7 +595,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { if !strings.HasSuffix(fi.name, "/") && fi.IsDir() { fi.name += "/" } - glog.V(4).Infof("entry: %v", fi.name) + log.V(-1).Infof("entry: %v", fi.name) ret = append(ret, &fi) return nil }) @@ -625,7 +625,7 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) { - glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence) + log.V(1).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence) ctx := context.Background() @@ -646,7 +646,7 @@ func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) { func (f *WebDavFile) Stat() (os.FileInfo, error) { - glog.V(2).Infof("WebDavFile.Stat %v", f.name) + log.V(1).Infof("WebDavFile.Stat %v", f.name) ctx := context.Background() diff --git a/weed/sftpd/sftp_filer.go b/weed/sftpd/sftp_filer.go index dbe6a438d..a0d86ebb4 100644 --- a/weed/sftpd/sftp_filer.go +++ b/weed/sftpd/sftp_filer.go @@ -17,7 +17,7 @@ import ( "github.com/pkg/sftp" "github.com/seaweedfs/seaweedfs/weed/filer" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" weed_server "github.com/seaweedfs/seaweedfs/weed/server" @@ -90,7 +90,7 @@ func (fs *SftpServer) withTimeoutContext(fn func(ctx context.Context) error) err // ==================== Command Dispatcher ==================== func (fs *SftpServer) dispatchCmd(r *sftp.Request) error { - glog.V(0).Infof("Dispatch: %s %s", r.Method, r.Filepath) + log.V(3).Infof("Dispatch: %s %s", r.Method, r.Filepath) switch r.Method { case "Remove": return fs.removeEntry(r) @@ -196,7 +196,7 @@ func (fs *SftpServer) putFile(filepath string, data []byte, user *user.User) err if err != nil { // Log the error but don't fail the whole operation - glog.Errorf("Failed to update file ownership for %s: %v", filepath, err) + log.Errorf("Failed to update file ownership for %s: %v", filepath, err) } } diff --git a/weed/sftpd/sftp_helpers.go b/weed/sftpd/sftp_helpers.go index 0545528b9..23af4f0c0 100644 --- a/weed/sftpd/sftp_helpers.go +++ b/weed/sftpd/sftp_helpers.go @@ -8,7 +8,7 @@ import ( "time" "github.com/pkg/sftp" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -117,7 +117,7 @@ func (w *filerFileWriter) Close() error { // Check permissions based on file metadata and user permissions if err := w.fs.checkFilePermission(dir, "write"); err != nil { - glog.Errorf("Permission denied for %s", dir) + log.Errorf("Permission denied for %s", dir) return err } diff --git a/weed/sftpd/sftp_service.go b/weed/sftpd/sftp_service.go index 76cba305c..580c09a61 100644 --- a/weed/sftpd/sftp_service.go +++ b/weed/sftpd/sftp_service.go @@ -5,19 +5,18 @@ import ( "context" "fmt" "io" - "log" "net" "os" "path/filepath" "time" "github.com/pkg/sftp" - "github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/pb" filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/sftpd/auth" "github.com/seaweedfs/seaweedfs/weed/sftpd/user" "github.com/seaweedfs/seaweedfs/weed/util" + "github.com/seaweedfs/seaweedfs/weed/util/log" "golang.org/x/crypto/ssh" "google.golang.org/grpc" ) @@ -60,7 +59,7 @@ func NewSFTPService(options *SFTPServiceOptions) *SFTPService { // Initialize user store userStore, err := user.NewFileStore(options.UserStoreFile) if err != nil { - glog.Fatalf("Failed to initialize user store: %v", err) + log.Fatalf("Failed to initialize user store: %v", err) } service.userStore = userStore @@ -166,7 +165,7 @@ func (s *SFTPService) Serve(listener net.Listener) error { return fmt.Errorf("failed to create SSH config: %v", err) } - glog.V(0).Infof("Starting Seaweed SFTP service on %s", listener.Addr().String()) + log.V(3).Infof("Starting Seaweed SFTP service on %s", listener.Addr().String()) for { conn, err := listener.Accept() @@ -247,7 +246,7 @@ func (s *SFTPService) addHostKey(config *ssh.ServerConfig, keyPath string) error return fmt.Errorf("failed to parse host key %s: %v", keyPath, err) } config.AddHostKey(signer) - glog.V(0).Infof("Added host key %s (%s)", keyPath, signer.PublicKey().Type()) + log.V(3).Infof("Added host key %s (%s)", keyPath, signer.PublicKey().Type()) return nil } @@ -259,7 +258,7 @@ func (s *SFTPService) handleSSHConnection(conn net.Conn, config *ssh.ServerConfi // Perform SSH handshake sshConn, chans, reqs, err := ssh.NewServerConn(conn, config) if err != nil { - glog.Errorf("Failed to handshake: %v", err) + log.Errorf("Failed to handshake: %v", err) conn.Close() return } @@ -275,13 +274,13 @@ func (s *SFTPService) handleSSHConnection(conn net.Conn, config *ssh.ServerConfi go s.monitorConnection(ctx, sshConn) username := sshConn.Permissions.Extensions["username"] - glog.V(0).Infof("New SSH connection from %s (%s) as user %s", + log.V(3).Infof("New SSH connection from %s (%s) as user %s", sshConn.RemoteAddr(), sshConn.ClientVersion(), username) // Get user from store sftpUser, err := s.authManager.GetUser(username) if err != nil { - glog.Errorf("Failed to retrieve user %s: %v", username, err) + log.Errorf("Failed to retrieve user %s: %v", username, err) sshConn.Close() return } @@ -297,7 +296,7 @@ func (s *SFTPService) handleSSHConnection(conn net.Conn, config *ssh.ServerConfi // Ensure home directory exists with proper permissions if err := s.homeManager.EnsureHomeDirectory(sftpUser); err != nil { - glog.Errorf("Failed to ensure home directory for user %s: %v", username, err) + log.Errorf("Failed to ensure home directory for user %s: %v", username, err) // We don't close the connection here, as the user might still be able to access other directories } @@ -328,11 +327,11 @@ func (s *SFTPService) monitorConnection(ctx context.Context, sshConn *ssh.Server _, _, err := sshConn.SendRequest("keepalive@openssh.com", true, nil) if err != nil { missedCount++ - glog.V(0).Infof("Keep-alive missed for %s: %v (%d/%d)", + log.V(3).Infof("Keep-alive missed for %s: %v (%d/%d)", sshConn.RemoteAddr(), err, missedCount, s.options.ClientAliveCountMax) if missedCount >= s.options.ClientAliveCountMax { - glog.Warningf("Closing unresponsive connection from %s", sshConn.RemoteAddr()) + log.Warningf("Closing unresponsive connection from %s", sshConn.RemoteAddr()) sshConn.Close() return } @@ -352,7 +351,7 @@ func (s *SFTPService) handleChannel(newChannel ssh.NewChannel, fs *SftpServer) { channel, requests, err := newChannel.Accept() if err != nil { - glog.Errorf("Could not accept channel: %v", err) + log.Errorf("Could not accept channel: %v", err) return } @@ -387,8 +386,8 @@ func (s *SFTPService) handleSFTP(channel ssh.Channel, fs *SftpServer) { if err := server.Serve(); err == io.EOF { server.Close() - glog.V(0).Info("SFTP client exited session.") + log.V(3).Info("SFTP client exited session.") } else if err != nil { - glog.Errorf("SFTP server finished with error: %v", err) + log.Errorf("SFTP server finished with error: %v", err) } } diff --git a/weed/sftpd/user/homemanager.go b/weed/sftpd/user/homemanager.go index c9051939c..b694e8f85 100644 --- a/weed/sftpd/user/homemanager.go +++ b/weed/sftpd/user/homemanager.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -38,7 +38,7 @@ func (hm *HomeManager) EnsureHomeDirectory(user *User) error { return fmt.Errorf("user has no home directory configured") } - glog.V(0).Infof("Ensuring home directory exists for user %s: %s", user.Username, user.HomeDir) + log.V(3).Infof("Ensuring home directory exists for user %s: %s", user.Username, user.HomeDir) // Check if home directory exists and create it if needed err := hm.createDirectoryIfNotExists(user.HomeDir, user) @@ -54,7 +54,7 @@ func (hm *HomeManager) EnsureHomeDirectory(user *User) error { // Only add permissions if not already present if _, exists := user.Permissions[user.HomeDir]; !exists { user.Permissions[user.HomeDir] = []string{"all"} - glog.V(0).Infof("Added full permissions for user %s to home directory %s", + log.V(3).Infof("Added full permissions for user %s to home directory %s", user.Username, user.HomeDir) } @@ -102,7 +102,7 @@ func (hm *HomeManager) createSingleDirectory(dirPath string, user *User) error { if err != nil || resp.Entry == nil { // Directory doesn't exist, create it - glog.V(0).Infof("Creating directory %s for user %s", dirPath, user.Username) + log.V(3).Infof("Creating directory %s for user %s", dirPath, user.Username) err = filer_pb.Mkdir(hm, string(dir), name, func(entry *filer_pb.Entry) { // Set appropriate permissions @@ -133,7 +133,7 @@ func (hm *HomeManager) createSingleDirectory(dirPath string, user *User) error { // Update ownership if needed if resp.Entry.Attributes.Uid != user.Uid || resp.Entry.Attributes.Gid != user.Gid { - glog.V(0).Infof("Updating ownership of directory %s for user %s", dirPath, user.Username) + log.V(3).Infof("Updating ownership of directory %s for user %s", dirPath, user.Username) entry := resp.Entry entry.Attributes.Uid = user.Uid @@ -145,7 +145,7 @@ func (hm *HomeManager) createSingleDirectory(dirPath string, user *User) error { }) if updateErr != nil { - glog.Warningf("Failed to update directory ownership: %v", updateErr) + log.Warningf("Failed to update directory ownership: %v", updateErr) } } } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index a6f27232e..15685e113 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -9,7 +9,7 @@ import ( "sort" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" @@ -306,7 +306,7 @@ func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption, if targetAddress != existingLocation { copiedShardIds = shardIdsToCopy - glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds) + log.V(3).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds) } return nil diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index db2fac469..7cb87dcad 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -7,7 +7,7 @@ import ( "io" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/wdclient" @@ -94,7 +94,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr nodeCount++ }) if nodeCount < erasure_coding.ParityShardsCount { - glog.V(0).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount) + log.V(3).Infof("skip erasure coding with %d nodes, less than recommended %d nodes", nodeCount, erasure_coding.ParityShardsCount) return nil } } @@ -275,13 +275,13 @@ func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection stri if good, found := vidMap[v.Id]; found { if good { if diskInfo.FreeVolumeCount < 2 { - glog.V(0).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id) + log.V(3).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id) vidMap[v.Id] = false } } } else { if diskInfo.FreeVolumeCount < 2 { - glog.V(0).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id) + log.V(3).Infof("skip %s %d on %s, no free disk", v.Collection, v.Id, dn.Id) vidMap[v.Id] = false } else { vidMap[v.Id] = true diff --git a/weed/shell/command_volume_tier_move.go b/weed/shell/command_volume_tier_move.go index 46da39eef..b00d7502a 100644 --- a/weed/shell/command_volume_tier_move.go +++ b/weed/shell/command_volume_tier_move.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -246,7 +246,7 @@ func (c *commandVolumeTierMove) doMoveOneVolume(commandEnv *CommandEnv, writer i if err = LiveMoveVolume(commandEnv.option.GrpcDialOption, writer, vid, sourceVolumeServer, newAddress, 5*time.Second, toDiskType.ReadableString(), ioBytePerSecond, true); err != nil { // mark all replicas as writable if err = markVolumeReplicasWritable(commandEnv.option.GrpcDialOption, vid, locations, true, false); err != nil { - glog.Errorf("mark volume %d as writable on %s: %v", vid, locations[0].Url, err) + log.Errorf("mark volume %d as writable on %s: %v", vid, locations[0].Url, err) } return fmt.Errorf("move volume %d %s => %s : %v", vid, locations[0].Url, dst.dataNode.Id, err) @@ -268,7 +268,7 @@ func (c *commandVolumeTierMove) doMoveOneVolume(commandEnv *CommandEnv, writer i return nil }) if err != nil { - glog.Errorf("update volume %d replication on %s: %v", vid, locations[0].Url, err) + log.Errorf("update volume %d replication on %s: %v", vid, locations[0].Url, err) } } diff --git a/weed/stats/disk.go b/weed/stats/disk.go index 5a8699a54..db113461e 100644 --- a/weed/stats/disk.go +++ b/weed/stats/disk.go @@ -1,7 +1,7 @@ package stats import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) @@ -9,7 +9,7 @@ func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) { disk = &volume_server_pb.DiskStatus{Dir: path} fillInDiskStatus(disk) if disk.PercentUsed > 95 { - glog.V(0).Infof("disk status: %v", disk) + log.V(3).Infof("disk status: %v", disk) } return } diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go index 26164ffc3..ae3f3fa24 100644 --- a/weed/stats/metrics.go +++ b/weed/stats/metrics.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/push" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // Readonly volume types @@ -406,14 +406,14 @@ func LoopPushingMetric(name, instance, addr string, intervalSeconds int) { return } - glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds) + log.V(3).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds) pusher := push.New(addr, name).Gatherer(Gather).Grouping("instance", instance) for { err := pusher.Push() if err != nil && !strings.HasPrefix(err.Error(), "unexpected status code 200") { - glog.V(0).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err) + log.V(3).Infof("could not push metrics to prometheus push gateway %s: %v", addr, err) } if intervalSeconds <= 0 { intervalSeconds = 15 @@ -435,7 +435,7 @@ func StartMetricsServer(ip string, port int) { return } http.Handle("/metrics", promhttp.HandlerFor(Gather, promhttp.HandlerOpts{})) - glog.Fatal(http.ListenAndServe(JoinHostPort(ip, port), nil)) + log.Fatal(http.ListenAndServe(JoinHostPort(ip, port), nil)) } func SourceName(port uint32) string { @@ -461,7 +461,7 @@ func DeleteCollectionMetrics(collection string) { c += VolumeServerVolumeGauge.DeletePartialMatch(labels) c += VolumeServerReadOnlyVolumeGauge.DeletePartialMatch(labels) - glog.V(0).Infof("delete collection metrics, %s: %d", collection, c) + log.V(3).Infof("delete collection metrics, %s: %d", collection, c) } func bucketMetricTTLControl() { @@ -482,7 +482,7 @@ func bucketMetricTTLControl() { c += S3BucketTrafficSentBytesCounter.DeletePartialMatch(labels) c += S3DeletedObjectsCounter.DeletePartialMatch(labels) c += S3UploadedObjectsCounter.DeletePartialMatch(labels) - glog.V(0).Infof("delete inactive bucket metrics, %s: %d", bucket, c) + log.V(3).Infof("delete inactive bucket metrics, %s: %d", bucket, c) } } diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index c17bec822..4a2553c0a 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) @@ -52,7 +52,7 @@ func LoadConfiguration(config *util.ViperProxy) { for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] if !found { - glog.Fatalf("backend storage type %s not found", backendTypeName) + log.Fatalf("backend storage type %s not found", backendTypeName) } for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { @@ -64,7 +64,7 @@ func LoadConfiguration(config *util.ViperProxy) { backendStorage, buildErr := backendStorageFactory.BuildStorage(config, StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) if buildErr != nil { - glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) + log.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) } BackendStorages[backendTypeName+"."+backendStorageId] = backendStorage if backendStorageId == "default" { @@ -81,7 +81,7 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { for _, storageBackend := range storageBackends { backendStorageFactory, found := BackendStorageFactories[StorageType(storageBackend.Type)] if !found { - glog.Warningf("storage type %s not found", storageBackend.Type) + log.Warningf("storage type %s not found", storageBackend.Type) continue } if _, found := BackendStorages[storageBackend.Type+"."+storageBackend.Id]; found { @@ -89,7 +89,7 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { } backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) if buildErr != nil { - glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) + log.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) } BackendStorages[storageBackend.Type+"."+storageBackend.Id] = backendStorage if storageBackend.Id == "default" { diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index 070f79865..d1ac5704c 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -1,7 +1,7 @@ package backend import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "io" "os" @@ -25,7 +25,7 @@ type DiskFile struct { func NewDiskFile(f *os.File) *DiskFile { stat, err := f.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", f.Name(), err) + log.Fatalf("stat file %s: %v", f.Name(), err) } offset := stat.Size() if offset%NeedlePaddingSize != 0 { diff --git a/weed/storage/backend/rclone_backend/rclone_backend.go b/weed/storage/backend/rclone_backend/rclone_backend.go index e47c2f908..705a761ba 100644 --- a/weed/storage/backend/rclone_backend/rclone_backend.go +++ b/weed/storage/backend/rclone_backend/rclone_backend.go @@ -21,7 +21,7 @@ import ( "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/object" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" ) @@ -66,11 +66,11 @@ func newRcloneBackendStorage(configuration backend.StringProperties, configPrefi fsPath := fmt.Sprintf("%s:", s.remoteName) s.fs, err = fs.NewFs(ctx, fsPath) if err != nil { - glog.Errorf("failed to instantiate Rclone filesystem: %s", err) + log.Errorf("failed to instantiate Rclone filesystem: %s", err) return } - glog.V(0).Infof("created backend storage rclone.%s for remote name %s", s.id, s.remoteName) + log.V(3).Infof("created backend storage rclone.%s for remote name %s", s.id, s.remoteName) return } @@ -118,7 +118,7 @@ func (s *RcloneBackendStorage) CopyFile(f *os.File, fn func(progressed int64, pe return key, 0, err } - glog.V(1).Infof("copy dat file of %s to remote rclone.%s as %s", f.Name(), s.id, key) + log.V(2).Infof("copy dat file of %s to remote rclone.%s as %s", f.Name(), s.id, key) util.Retry("upload via Rclone", func() error { size, err = uploadViaRclone(s.fs, f.Name(), key, fn) @@ -164,7 +164,7 @@ func uploadViaRclone(rfs fs.Fs, filename string, key string, fn func(progressed } func (s *RcloneBackendStorage) DownloadFile(filename string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { - glog.V(1).Infof("download dat file of %s from remote rclone.%s as %s", filename, s.id, key) + log.V(2).Infof("download dat file of %s from remote rclone.%s as %s", filename, s.id, key) util.Retry("download via Rclone", func() error { size, err = downloadViaRclone(s.fs, filename, key, fn) @@ -216,7 +216,7 @@ func downloadViaRclone(fs fs.Fs, filename string, key string, fn func(progressed } func (s *RcloneBackendStorage) DeleteFile(key string) (err error) { - glog.V(1).Infof("delete dat file %s from remote", key) + log.V(2).Infof("delete dat file %s from remote", key) util.Retry("delete via Rclone", func() error { err = deleteViaRclone(s.fs, key) diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 139073fe4..33e876bcf 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" ) @@ -60,7 +60,7 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint, s.forcePathStyle) - glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) + log.V(3).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) return } @@ -94,7 +94,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen randomUuid, _ := uuid.NewRandom() key = randomUuid.String() - glog.V(1).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) + log.V(2).Infof("copying dat file of %s to remote s3.%s as %s", f.Name(), s.id, key) util.Retry("upload to S3", func() error { size, err = uploadToS3(s.conn, f.Name(), s.bucket, key, s.storageClass, fn) @@ -106,7 +106,7 @@ func (s *S3BackendStorage) CopyFile(f *os.File, fn func(progressed int64, percen func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(progressed int64, percentage float32) error) (size int64, err error) { - glog.V(1).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) + log.V(2).Infof("download dat file of %s from remote s3.%s as %s", fileName, s.id, key) size, err = downloadFromS3(s.conn, fileName, s.bucket, key, fn) @@ -115,7 +115,7 @@ func (s *S3BackendStorage) DownloadFile(fileName string, key string, fn func(pro func (s *S3BackendStorage) DeleteFile(key string) (err error) { - glog.V(1).Infof("delete dat file %s from remote", key) + log.V(2).Infof("delete dat file %s from remote", key) err = deleteFromS3(s.conn, s.bucket, key) @@ -143,8 +143,8 @@ func (s3backendStorageFile S3BackendStorageFile) ReadAt(p []byte, off int64) (n } defer getObjectOutput.Body.Close() - // glog.V(3).Infof("read %s %s", s3backendStorageFile.key, bytesRange) - // glog.V(3).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) + // log.V(0).Infof("read %s %s", s3backendStorageFile.key, bytesRange) + // log.V(0).Infof("content range: %s, contentLength: %d", *getObjectOutput.ContentRange, *getObjectOutput.ContentLength) var readCount int for { diff --git a/weed/storage/backend/s3_backend/s3_download.go b/weed/storage/backend/s3_backend/s3_download.go index b0d30fbdb..5caca47c7 100644 --- a/weed/storage/backend/s3_backend/s3_download.go +++ b/weed/storage/backend/s3_backend/s3_download.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string, sourceKey string, @@ -50,7 +50,7 @@ func downloadFromS3(sess s3iface.S3API, destFileName string, sourceBucket string return fileSize, fmt.Errorf("failed to download /buckets/%s%s to %s: %v", sourceBucket, sourceKey, destFileName, err) } - glog.V(1).Infof("downloaded file %s\n", destFileName) + log.V(2).Infof("downloaded file %s\n", destFileName) return } diff --git a/weed/storage/backend/s3_backend/s3_upload.go b/weed/storage/backend/s3_backend/s3_upload.go index 537e6bd1d..26c88db29 100644 --- a/weed/storage/backend/s3_backend/s3_upload.go +++ b/weed/storage/backend/s3_backend/s3_upload.go @@ -8,7 +8,7 @@ import ( "os" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey string, storageClass string, fn func(progressed int64, percentage float32) error) (fileSize int64, err error) { @@ -58,7 +58,7 @@ func uploadToS3(sess s3iface.S3API, filename string, destBucket string, destKey if err != nil { return 0, fmt.Errorf("failed to upload file %s: %v", filename, err) } - glog.V(1).Infof("file %s uploaded to %s\n", filename, result.Location) + log.V(2).Infof("file %s uploaded to %s\n", filename, result.Location) return } diff --git a/weed/storage/backend/volume_create.go b/weed/storage/backend/volume_create.go index def376822..ad1665071 100644 --- a/weed/storage/backend/volume_create.go +++ b/weed/storage/backend/volume_create.go @@ -6,7 +6,7 @@ package backend import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { @@ -15,7 +15,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32 return nil, e } if preallocate > 0 { - glog.V(2).Infof("Preallocated disk space for %s is not supported", fileName) + log.V(1).Infof("Preallocated disk space for %s is not supported", fileName) } return NewDiskFile(file), nil } diff --git a/weed/storage/backend/volume_create_linux.go b/weed/storage/backend/volume_create_linux.go index 2e52cce1b..2319c8c5b 100644 --- a/weed/storage/backend/volume_create_linux.go +++ b/weed/storage/backend/volume_create_linux.go @@ -7,7 +7,7 @@ import ( "os" "syscall" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { @@ -17,7 +17,7 @@ func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32 } if preallocate != 0 { syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) - glog.V(1).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) + log.V(2).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) } return NewDiskFile(file), nil } diff --git a/weed/storage/backend/volume_create_windows.go b/weed/storage/backend/volume_create_windows.go index b2e0ea92b..fbd1840e9 100644 --- a/weed/storage/backend/volume_create_windows.go +++ b/weed/storage/backend/volume_create_windows.go @@ -7,13 +7,13 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/backend/memory_map" "golang.org/x/sys/windows" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend/memory_map/os_overloads" ) func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) { if preallocate > 0 { - glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) + log.V(3).Infof("Preallocated disk space for %s is not supported", fileName) } if memoryMapSizeMB > 0 { diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index cc89c4ca1..459708e49 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -11,7 +11,7 @@ import ( "time" "github.com/google/uuid" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -39,7 +39,7 @@ type DiskLocation struct { } func GenerateDirUuid(dir string) (dirUuidString string, err error) { - glog.V(1).Infof("Getting uuid of volume directory:%s", dir) + log.V(2).Infof("Getting uuid of volume directory:%s", dir) fileName := dir + "/vol_dir.uuid" if !util.FileExists(fileName) { dirUuidString, err = writeNewUuid(fileName) @@ -67,7 +67,7 @@ func writeNewUuid(fileName string) (string, error) { } func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation { - glog.V(4).Infof("Added new Disk %s: maxVolumes=%d", dir, maxVolumeCount) + log.V(-1).Infof("Added new Disk %s: maxVolumes=%d", dir, maxVolumeCount) dir = util.ResolvePath(dir) if idxDir == "" { idxDir = dir @@ -76,7 +76,7 @@ func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFree } dirUuid, err := GenerateDirUuid(dir) if err != nil { - glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err) + log.Fatalf("cannot generate uuid of dir %s: %v", dir, err) } location := &DiskLocation{ Directory: dir, @@ -155,7 +155,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne noteFile := l.Directory + "/" + volumeName + ".note" if util.FileExists(noteFile) { note, _ := os.ReadFile(noteFile) - glog.Warningf("volume %s was not completed: %s", volumeName, string(note)) + log.Warningf("volume %s was not completed: %s", volumeName, string(note)) removeVolumeFiles(l.Directory + "/" + volumeName) removeVolumeFiles(l.IdxDirectory + "/" + volumeName) return false @@ -164,7 +164,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne // parse out collection, volume id vid, collection, err := volumeIdFromFileName(basename) if err != nil { - glog.Warningf("get volume id failed, %s, err : %s", volumeName, err) + log.Warningf("get volume id failed, %s, err : %s", volumeName, err) return false } @@ -173,21 +173,21 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne _, found := l.volumes[vid] l.volumesLock.RUnlock() if found { - glog.V(1).Infof("loaded volume, %v", vid) + log.V(2).Infof("loaded volume, %v", vid) return true } // load the volume v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout) if e != nil { - glog.V(0).Infof("new volume %s error %s", volumeName, e) + log.V(3).Infof("new volume %s error %s", volumeName, e) return false } l.SetVolume(vid, v) size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", + log.V(3).Infof("data file %s, replication=%s v=%d size=%d ttl=%s", l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) return true } @@ -234,7 +234,7 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeo num, err := strconv.Atoi(val) if err != nil || num < 1 { num = 10 - glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10") + log.Warningf("failed to set worker number from GOMAXPROCS , set to default:10") } workerNum = num } else { @@ -243,10 +243,10 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeo } } l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout) - glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) + log.V(3).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) l.loadAllEcShards() - glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes)) + log.V(3).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes)) } @@ -434,7 +434,7 @@ func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) } datSize, idxSize, _ := vol.FileStat() unUsedSpaceVolume := int64(volumeSizeLimit) - int64(datSize+idxSize) - glog.V(4).Infof("Volume stats for %d: volumeSizeLimit=%d, datSize=%d idxSize=%d unused=%d", vol.Id, volumeSizeLimit, datSize, idxSize, unUsedSpaceVolume) + log.V(-1).Infof("Volume stats for %d: volumeSizeLimit=%d, datSize=%d idxSize=%d unused=%d", vol.Id, volumeSizeLimit, datSize, idxSize, unUsedSpaceVolume) if unUsedSpaceVolume >= 0 { unUsedSpace += uint64(unUsedSpaceVolume) } @@ -455,11 +455,11 @@ func (l *DiskLocation) CheckDiskSpace() { l.isDiskSpaceLow = !l.isDiskSpaceLow } - logLevel := glog.Level(4) + logLevel := log.Level(4) if l.isDiskSpaceLow { - logLevel = glog.Level(0) + logLevel = log.Level(0) } - glog.V(logLevel).Infof("dir %s %s", dir, desc) + log.V(logLevel).Infof("dir %s %s", dir, desc) } } diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index a46643f57..6836382b0 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -7,7 +7,7 @@ import ( "github.com/klauspost/reedsolomon" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -78,7 +78,7 @@ func generateEcFiles(baseFileName string, bufferSize int, largeBlockSize int64, return fmt.Errorf("failed to stat dat file: %v", err) } - glog.V(0).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) + log.V(3).Infof("encodeDatFile %s.dat size:%d", baseFileName, fi.Size()) err = encodeDatFile(fi.Size(), baseFileName, bufferSize, largeBlockSize, file, smallBlockSize) if err != nil { return fmt.Errorf("encodeDatFile: %v", err) @@ -121,12 +121,12 @@ func encodeData(file *os.File, enc reedsolomon.Encoder, startOffset, blockSize i bufferSize := int64(len(buffers[0])) if bufferSize == 0 { - glog.Fatal("unexpected zero buffer size") + log.Fatal("unexpected zero buffer size") } batchCount := blockSize / bufferSize if blockSize%bufferSize != 0 { - glog.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize) + log.Fatalf("unexpected block size %d buffer size %d", blockSize, bufferSize) } for b := int64(0); b < batchCount; b++ { diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index b3744807a..d14166502 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" @@ -74,7 +74,7 @@ func NewEcVolume(diskType types.DiskType, dir string, dirIdx string, collection ev.datFileSize = volumeInfo.DatFileSize ev.ExpireAtSec = volumeInfo.ExpireAtSec } else { - glog.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) + log.Warningf("vif file not found,volumeId:%d, filename:%s", vid, dataBaseFileName) volume_info.SaveVolumeInfo(dataBaseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) } diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go index e31c44a46..aa3aa01bf 100644 --- a/weed/storage/idx/walk.go +++ b/weed/storage/idx/walk.go @@ -3,7 +3,7 @@ package idx import ( "io" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -16,7 +16,7 @@ func WalkIndexFile(r io.ReaderAt, startFrom uint64, fn func(key types.NeedleId, if count == 0 && e == io.EOF { return nil } - glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) + log.V(0).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) var ( key types.NeedleId @@ -36,7 +36,7 @@ func WalkIndexFile(r io.ReaderAt, startFrom uint64, fn func(key types.NeedleId, return nil } count, e = r.ReadAt(bytes, readerOffset) - glog.V(3).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) + log.V(0).Infof("readerOffset %d count %d err: %v", readerOffset, count, e) readerOffset += int64(count) } return e diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go index 89708303d..1bc00e7d0 100644 --- a/weed/storage/needle/needle_parse_upload.go +++ b/weed/storage/needle/needle_parse_upload.go @@ -13,7 +13,7 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -113,7 +113,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { form, fe := r.MultipartReader() if fe != nil { - glog.V(0).Infoln("MultipartReader [ERROR]", fe) + log.V(3).Infoln("MultipartReader [ERROR]", fe) e = fe return } @@ -121,7 +121,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { // first multi-part item part, fe := form.NextPart() if fe != nil { - glog.V(0).Infoln("Reading Multi part [ERROR]", fe) + log.V(3).Infoln("Reading Multi part [ERROR]", fe) e = fe return } @@ -133,7 +133,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { dataSize, e = pu.bytesBuffer.ReadFrom(io.LimitReader(part, sizeLimit+1)) if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) + log.V(3).Infoln("Reading Content [ERROR]", e) return } if dataSize == sizeLimit+1 { @@ -158,7 +158,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { pu.bytesBuffer.Reset() dataSize2, fe2 := pu.bytesBuffer.ReadFrom(io.LimitReader(part2, sizeLimit+1)) if fe2 != nil { - glog.V(0).Infoln("Reading Content [ERROR]", fe2) + log.V(3).Infoln("Reading Content [ERROR]", fe2) e = fe2 return } @@ -215,7 +215,7 @@ func parseUpload(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { dataSize, e = pu.bytesBuffer.ReadFrom(io.LimitReader(r.Body, sizeLimit+1)) if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) + log.V(3).Infoln("Reading Content [ERROR]", e) return } if dataSize == sizeLimit+1 { diff --git a/weed/storage/needle/needle_read.go b/weed/storage/needle/needle_read.go index 1907efad3..a84f1a5af 100644 --- a/weed/storage/needle/needle_read.go +++ b/weed/storage/needle/needle_read.go @@ -3,7 +3,7 @@ package needle import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -42,7 +42,7 @@ func ReadNeedleBlob(r backend.BackendStorageFile, offset int64, size Size, versi } if err != nil { fileSize, _, _ := r.GetStat() - glog.Errorf("%s read %d dataSize %d offset %d fileSize %d: %v", r.Name(), n, dataSize, offset, fileSize, err) + log.Errorf("%s read %d dataSize %d offset %d fileSize %d: %v", r.Name(), n, dataSize, offset, fileSize, err) } return dataSlice, err @@ -55,7 +55,7 @@ func (n *Needle) ReadBytes(bytes []byte, offset int64, size Size, version Versio // cookie is not always passed in for this API. Use size to do preliminary checking. if OffsetSize == 4 && offset < int64(MaxPossibleVolumeSize) { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorSizeMismatchOffsetSize).Inc() - glog.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) + log.Errorf("entry not found1: offset %d found id %x size %d, expected size %d", offset, n.Id, n.Size, size) return ErrorSizeMismatch } stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorSizeMismatch).Inc() @@ -238,7 +238,7 @@ func (n *Needle) ReadNeedleBody(r backend.BackendStorageFile, version Version, o err = nil } if err != nil { - glog.Errorf("%s read %d bodyLength %d offset %d: %v", r.Name(), readCount, bodyLength, offset, err) + log.Errorf("%s read %d bodyLength %d offset %d: %v", r.Name(), readCount, bodyLength, offset, err) return } diff --git a/weed/storage/needle/needle_read_page.go b/weed/storage/needle/needle_read_page.go index 4e1032de8..c8857223a 100644 --- a/weed/storage/needle/needle_read_page.go +++ b/weed/storage/needle/needle_read_page.go @@ -2,7 +2,7 @@ package needle import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util" @@ -26,7 +26,7 @@ func (n *Needle) ReadNeedleData(r backend.BackendStorageFile, volumeOffset int64 } if err != nil { fileSize, _, _ := r.GetStat() - glog.Errorf("%s read %d %d size %d at offset %d fileSize %d: %v", r.Name(), n.Id, needleOffset, sizeToRead, volumeOffset, fileSize, err) + log.Errorf("%s read %d %d size %d at offset %d fileSize %d: %v", r.Name(), n.Id, needleOffset, sizeToRead, volumeOffset, fileSize, err) } return diff --git a/weed/storage/needle/needle_write.go b/weed/storage/needle/needle_write.go index 95854bc27..de6071046 100644 --- a/weed/storage/needle/needle_write.go +++ b/weed/storage/needle/needle_write.go @@ -3,7 +3,7 @@ package needle import ( "bytes" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" . "github.com/seaweedfs/seaweedfs/weed/storage/types" "github.com/seaweedfs/seaweedfs/weed/util" @@ -112,7 +112,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u defer func(w backend.BackendStorageFile, off int64) { if err != nil { if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } } }(w, end) @@ -147,7 +147,7 @@ func WriteNeedleBlob(w backend.BackendStorageFile, dataSlice []byte, size Size, defer func(w backend.BackendStorageFile, off int64) { if err != nil { if te := w.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", w.Name(), end, te) } } }(w, end) diff --git a/weed/storage/needle_map/compact_map_test.go b/weed/storage/needle_map/compact_map_test.go index 58d2a6e3a..8fc0f1a74 100644 --- a/weed/storage/needle_map/compact_map_test.go +++ b/weed/storage/needle_map/compact_map_test.go @@ -79,7 +79,7 @@ func TestCompactMap(t *testing.T) { // for i := uint32(0); i < 100; i++ { // if v := m.Get(Key(i)); v != nil { - // glog.V(4).Infoln(i, "=", v.Key, v.Offset, v.Size) + // log.V(-1).Infoln(i, "=", v.Key, v.Offset, v.Size) // } // } diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index d3d47b605..a3cbd88ed 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -10,7 +10,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -26,7 +26,7 @@ func NewMemDb() *MemDb { var err error t := &MemDb{} if t.db, err = leveldb.Open(storage.NewMemStorage(), opts); err != nil { - glog.V(0).Infof("MemDb fails to open: %v", err) + log.V(3).Infof("MemDb fails to open: %v", err) return nil } diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index a5a543ba2..3448e7407 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -16,7 +16,7 @@ import ( "github.com/syndtr/goleveldb/leveldb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -43,16 +43,16 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option m = &LevelDbNeedleMap{dbFileName: dbFileName} m.indexFile = indexFile if !isLevelDbFresh(dbFileName, indexFile) { - glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) + log.V(2).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) generateLevelDbFile(dbFileName, indexFile) - glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) + log.V(2).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) } if stat, err := indexFile.Stat(); err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) } else { m.indexFileOffset = stat.Size() } - glog.V(1).Infof("Opening %s...", dbFileName) + log.V(2).Infof("Opening %s...", dbFileName) if m.ldbTimeout == 0 { if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil { @@ -63,12 +63,12 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option return } } - glog.V(0).Infof("Loading %s... , watermark: %d", dbFileName, getWatermark(m.db)) + log.V(3).Infof("Loading %s... , watermark: %d", dbFileName, getWatermark(m.db)) m.recordCount = uint64(m.indexFileOffset / NeedleMapEntrySize) watermark := (m.recordCount / watermarkBatchSize) * watermarkBatchSize err = setWatermark(m.db, watermark) if err != nil { - glog.Fatalf("set watermark for %s error: %s\n", dbFileName, err) + log.Fatalf("set watermark for %s error: %s\n", dbFileName, err) return } } @@ -97,7 +97,7 @@ func isLevelDbFresh(dbFileName string, indexFile *os.File) bool { dbStat, dbStatErr := dbLogFile.Stat() indexStat, indexStatErr := indexFile.Stat() if dbStatErr != nil || indexStatErr != nil { - glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + log.V(3).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) return false } @@ -113,13 +113,13 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { watermark := getWatermark(db) if stat, err := indexFile.Stat(); err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) return err } else { if watermark*NeedleMapEntrySize > uint64(stat.Size()) { - glog.Warningf("wrong watermark %d for filesize %d", watermark, stat.Size()) + log.Warningf("wrong watermark %d for filesize %d", watermark, stat.Size()) } - glog.V(0).Infof("generateLevelDbFile %s, watermark %d, num of entries:%d", dbFileName, watermark, (uint64(stat.Size())-watermark*NeedleMapEntrySize)/NeedleMapEntrySize) + log.V(3).Infof("generateLevelDbFile %s, watermark %d, num of entries:%d", dbFileName, watermark, (uint64(stat.Size())-watermark*NeedleMapEntrySize)/NeedleMapEntrySize) } return idx.WalkIndexFile(indexFile, watermark, func(key NeedleId, offset Offset, size Size) error { if !offset.IsZero() && size.IsValid() { @@ -175,7 +175,7 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { watermark = 0 } else { watermark = (m.recordCount / watermarkBatchSize) * watermarkBatchSize - glog.V(1).Infof("put cnt:%d for %s,watermark: %d", m.recordCount, m.dbFileName, watermark) + log.V(2).Infof("put cnt:%d for %s,watermark: %d", m.recordCount, m.dbFileName, watermark) } return levelDbWrite(m.db, key, offset, size, watermark == 0, watermark) } @@ -183,14 +183,14 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { func getWatermark(db *leveldb.DB) uint64 { data, err := db.Get(watermarkKey, nil) if err != nil || len(data) != 8 { - glog.V(1).Infof("read previous watermark from db: %v, %d", err, len(data)) + log.V(2).Infof("read previous watermark from db: %v, %d", err, len(data)) return 0 } return util.BytesToUint64(data) } func setWatermark(db *leveldb.DB, watermark uint64) error { - glog.V(3).Infof("set watermark %d", watermark) + log.V(0).Infof("set watermark %d", watermark) var wmBytes = make([]byte, 8) util.Uint64toBytes(wmBytes, watermark) if err := db.Put(watermarkKey, wmBytes, nil); err != nil { @@ -252,16 +252,16 @@ func (m *LevelDbNeedleMap) Close() { if m.indexFile != nil { indexFileName := m.indexFile.Name() if err := m.indexFile.Sync(); err != nil { - glog.Warningf("sync file %s failed: %v", indexFileName, err) + log.Warningf("sync file %s failed: %v", indexFileName, err) } if err := m.indexFile.Close(); err != nil { - glog.Warningf("close index file %s failed: %v", indexFileName, err) + log.Warningf("close index file %s failed: %v", indexFileName, err) } } if m.db != nil { if err := m.db.Close(); err != nil { - glog.Warningf("close levelDB failed: %v", err) + log.Warningf("close levelDB failed: %v", err) } } if m.ldbTimeout > 0 { @@ -309,7 +309,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * stat, e := indexFile.Stat() if e != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), e) + log.Fatalf("stat file %s: %v", indexFile.Name(), e) return e } m.indexFileOffset = stat.Size() @@ -319,7 +319,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * watermark := (m.recordCount / watermarkBatchSize) * watermarkBatchSize err = setWatermark(db, uint64(watermark)) if err != nil { - glog.Fatalf("setting watermark failed %s: %v", indexFile.Name(), err) + log.Fatalf("setting watermark failed %s: %v", indexFile.Name(), err) return err } v.nm = m @@ -335,7 +335,7 @@ func (m *LevelDbNeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts * } func (m *LevelDbNeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startFrom uint64) (err error) { - glog.V(0).Infof("loading idx to leveldb from offset %d for file: %s", startFrom, indexFile.Name()) + log.V(3).Infof("loading idx to leveldb from offset %d for file: %s", startFrom, indexFile.Name()) dbFileName := v.FileName(".cpldb") db, dbErr := leveldb.OpenFile(dbFileName, nil) defer func() { @@ -404,14 +404,14 @@ func reloadLdb(m *LevelDbNeedleMap) (err error) { if m.db != nil { return nil } - glog.V(1).Infof("reloading leveldb %s", m.dbFileName) + log.V(2).Infof("reloading leveldb %s", m.dbFileName) m.accessFlag = 1 if m.db, err = leveldb.OpenFile(m.dbFileName, m.ldbOpts); err != nil { if errors.IsCorrupted(err) { m.db, err = leveldb.RecoverFile(m.dbFileName, m.ldbOpts) } if err != nil { - glog.Fatalf("RecoverFile %s failed:%v", m.dbFileName, err) + log.Fatalf("RecoverFile %s failed:%v", m.dbFileName, err) return err } } @@ -422,7 +422,7 @@ func unloadLdb(m *LevelDbNeedleMap) (err error) { m.ldbAccessLock.Lock() defer m.ldbAccessLock.Unlock() if m.db != nil { - glog.V(1).Infof("reached max idle count, unload leveldb, %s", m.dbFileName) + log.V(2).Infof("reached max idle count, unload leveldb, %s", m.dbFileName) m.db.Close() m.db = nil } @@ -430,26 +430,26 @@ func unloadLdb(m *LevelDbNeedleMap) (err error) { } func lazyLoadingRoutine(m *LevelDbNeedleMap) (err error) { - glog.V(1).Infof("lazyLoadingRoutine %s", m.dbFileName) + log.V(2).Infof("lazyLoadingRoutine %s", m.dbFileName) var accessRecord int64 accessRecord = 1 for { select { case exit := <-m.exitChan: if exit { - glog.V(1).Infof("exit from lazyLoadingRoutine") + log.V(2).Infof("exit from lazyLoadingRoutine") return nil } case <-time.After(time.Hour * 1): - glog.V(1).Infof("timeout %s", m.dbFileName) + log.V(2).Infof("timeout %s", m.dbFileName) if m.accessFlag == 0 { accessRecord++ - glog.V(1).Infof("accessRecord++") + log.V(2).Infof("accessRecord++") if accessRecord >= m.ldbTimeout { unloadLdb(m) } } else { - glog.V(1).Infof("reset accessRecord %s", m.dbFileName) + log.V(2).Infof("reset accessRecord %s", m.dbFileName) // reset accessRecord accessRecord = 0 } diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index c75514a31..c65104e37 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -3,7 +3,7 @@ package storage import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -22,7 +22,7 @@ func NewCompactNeedleMap(file *os.File) *NeedleMap { nm.indexFile = file stat, err := file.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", file.Name(), err) + log.Fatalf("stat file %s: %v", file.Name(), err) } nm.indexFileOffset = stat.Size() return nm @@ -51,7 +51,7 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) { } return nil }) - glog.V(1).Infof("max file key: %v count: %d deleted: %d for file: %s", nm.MaxFileKey(), nm.FileCount(), nm.DeletedCount(), file.Name()) + log.V(2).Infof("max file key: %v count: %d deleted: %d for file: %s", nm.MaxFileKey(), nm.FileCount(), nm.DeletedCount(), file.Name()) return nm, e } @@ -75,7 +75,7 @@ func (nm *NeedleMap) Close() { } indexFileName := nm.indexFile.Name() if err := nm.indexFile.Sync(); err != nil { - glog.Warningf("sync file %s failed, %v", indexFileName, err) + log.Warningf("sync file %s failed, %v", indexFileName, err) } _ = nm.indexFile.Close() } @@ -98,7 +98,7 @@ func (nm *NeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts *opt.Op nm.indexFile = indexFile stat, err := indexFile.Stat() if err != nil { - glog.Fatalf("stat file %s: %v", indexFile.Name(), err) + log.Fatalf("stat file %s: %v", indexFile.Name(), err) return err } nm.indexFileOffset = stat.Size() @@ -108,7 +108,7 @@ func (nm *NeedleMap) UpdateNeedleMap(v *Volume, indexFile *os.File, opts *opt.Op } func (nm *NeedleMap) DoOffsetLoading(v *Volume, indexFile *os.File, startFrom uint64) error { - glog.V(0).Infof("loading idx from offset %d for file: %s", startFrom, indexFile.Name()) + log.V(3).Infof("loading idx from offset %d for file: %s", startFrom, indexFile.Name()) e := idx.WalkIndexFile(indexFile, startFrom, func(key NeedleId, offset Offset, size Size) error { nm.MaybeSetMaxFileKey(key) nm.FileCounter++ diff --git a/weed/storage/needle_map_metric.go b/weed/storage/needle_map_metric.go index d6d0a8730..58055f788 100644 --- a/weed/storage/needle_map_metric.go +++ b/weed/storage/needle_map_metric.go @@ -154,7 +154,7 @@ func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key for remainingCount >= 0 { n, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount) - // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e) + // log.V(3).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e) if e == io.EOF && n == int(NeedleMapEntrySize*nextBatchSize) { e = nil } diff --git a/weed/storage/needle_map_metric_test.go b/weed/storage/needle_map_metric_test.go index 96919d103..0873aac65 100644 --- a/weed/storage/needle_map_metric_test.go +++ b/weed/storage/needle_map_metric_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" . "github.com/seaweedfs/seaweedfs/weed/storage/types" ) @@ -23,9 +23,9 @@ func TestFastLoadingNeedleMapMetrics(t *testing.T) { mm, _ := newNeedleMapMetricFromIndexFile(idxFile) - glog.V(0).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount()) - glog.V(0).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize()) - glog.V(0).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize()) - glog.V(0).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount()) - glog.V(0).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey()) + log.V(3).Infof("FileCount expected %d actual %d", nm.FileCount(), mm.FileCount()) + log.V(3).Infof("DeletedSize expected %d actual %d", nm.DeletedSize(), mm.DeletedSize()) + log.V(3).Infof("ContentSize expected %d actual %d", nm.ContentSize(), mm.ContentSize()) + log.V(3).Infof("DeletedCount expected %d actual %d", nm.DeletedCount(), mm.DeletedCount()) + log.V(3).Infof("MaxFileKey expected %d actual %d", nm.MaxFileKey(), mm.MaxFileKey()) } diff --git a/weed/storage/needle_map_sorted_file.go b/weed/storage/needle_map_sorted_file.go index 5bd67ea86..cf4885cdc 100644 --- a/weed/storage/needle_map_sorted_file.go +++ b/weed/storage/needle_map_sorted_file.go @@ -3,7 +3,7 @@ package storage import ( "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle_map" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -21,18 +21,18 @@ func NewSortedFileNeedleMap(indexBaseFileName string, indexFile *os.File) (m *So m.indexFile = indexFile fileName := indexBaseFileName + ".sdx" if !isSortedFileFresh(fileName, indexFile) { - glog.V(0).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) + log.V(3).Infof("Start to Generate %s from %s", fileName, indexFile.Name()) erasure_coding.WriteSortedFileFromIdx(indexBaseFileName, ".sdx") - glog.V(0).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) + log.V(3).Infof("Finished Generating %s from %s", fileName, indexFile.Name()) } - glog.V(1).Infof("Opening %s...", fileName) + log.V(2).Infof("Opening %s...", fileName) if m.dbFile, err = os.OpenFile(indexBaseFileName+".sdx", os.O_RDWR, 0); err != nil { return } dbStat, _ := m.dbFile.Stat() m.dbFileSize = dbStat.Size() - glog.V(1).Infof("Loading %s...", indexFile.Name()) + log.V(2).Infof("Loading %s...", indexFile.Name()) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) if indexLoadError != nil { _ = m.dbFile.Close() @@ -52,7 +52,7 @@ func isSortedFileFresh(dbFileName string, indexFile *os.File) bool { dbStat, dbStatErr := dbFile.Stat() indexStat, indexStatErr := indexFile.Stat() if dbStatErr != nil || indexStatErr != nil { - glog.V(0).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) + log.V(3).Infof("Can not stat file: %v and %v", dbStatErr, indexStatErr) return false } diff --git a/weed/storage/store.go b/weed/storage/store.go index 3b2869a2e..b849cbf36 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -14,7 +14,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -166,11 +166,11 @@ func (s *Store) addVolume(vid needle.VolumeId, collection string, needleMapKind if location := s.FindFreeLocation(func(location *DiskLocation) bool { return location.DiskType == diskType }); location != nil { - glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", + log.V(3).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) if volume, err := NewVolume(location.Directory, location.IdxDirectory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate, memoryMapMaxSizeMb, ldbTimeout); err == nil { location.SetVolume(vid, volume) - glog.V(0).Infof("add volume %d", vid) + log.V(3).Infof("add volume %d", vid) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ Id: uint32(vid), Collection: collection, @@ -276,12 +276,12 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { deleteVids = append(deleteVids, v.Id) shouldDeleteVolume = true } else { - glog.V(0).Infof("volume %d is expired", v.Id) + log.V(3).Infof("volume %d is expired", v.Id) } if v.lastIoError != nil { deleteVids = append(deleteVids, v.Id) shouldDeleteVolume = true - glog.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) + log.Warningf("volume %d has IO error: %v", v.Id, v.lastIoError) } } @@ -329,10 +329,10 @@ func (s *Store) CollectHeartbeat() *master_pb.Heartbeat { found, err := location.deleteVolumeById(vid, false) if err == nil { if found { - glog.V(0).Infof("volume %d is deleted", vid) + log.V(3).Infof("volume %d is deleted", vid) } } else { - glog.Warningf("delete volume %d: %v", vid, err) + log.Warningf("delete volume %d: %v", vid, err) } } location.volumesLock.Unlock() @@ -401,7 +401,7 @@ func (s *Store) deleteExpiredEcVolumes() (ecShards, deleted []*master_pb.VolumeE err := location.deleteEcVolumeById(ev.VolumeId) if err != nil { ecShards = append(ecShards, messages...) - glog.Errorf("delete EcVolume err %d: %v", ev.VolumeId, err) + log.Errorf("delete EcVolume err %d: %v", ev.VolumeId, err) continue } // No need for additional lock here since we only need the messages @@ -440,7 +440,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, checkCook _, _, isUnchanged, err = v.writeNeedle2(n, checkCookie, fsync || s.isStopping) return } - glog.V(0).Infoln("volume", i, "not found!") + log.V(3).Infoln("volume", i, "not found!") err = fmt.Errorf("volume %d not found on %s:%d", i, s.Ip, s.Port) return } @@ -513,7 +513,7 @@ func (s *Store) MarkVolumeWritable(i needle.VolumeId) error { func (s *Store) MountVolume(i needle.VolumeId) error { for _, location := range s.Locations { if found := location.LoadVolume(i, s.NeedleMapKind); found == true { - glog.V(0).Infof("mount volume %d", i) + log.V(3).Infof("mount volume %d", i) v := s.findVolume(i) s.NewVolumesChan <- master_pb.VolumeShortInformationMessage{ Id: uint32(v.Id), @@ -547,7 +547,7 @@ func (s *Store) UnmountVolume(i needle.VolumeId) error { for _, location := range s.Locations { err := location.UnloadVolume(i) if err == nil { - glog.V(0).Infof("UnmountVolume %d", i) + log.V(3).Infof("UnmountVolume %d", i) s.DeletedVolumesChan <- message return nil } else if err == ErrVolumeNotFound { @@ -574,7 +574,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error { for _, location := range s.Locations { err := location.DeleteVolume(i, onlyEmpty) if err == nil { - glog.V(0).Infof("DeleteVolume %d", i) + log.V(3).Infof("DeleteVolume %d", i) s.DeletedVolumesChan <- message return nil } else if err == ErrVolumeNotFound { @@ -582,7 +582,7 @@ func (s *Store) DeleteVolume(i needle.VolumeId, onlyEmpty bool) error { } else if err == ErrVolumeNotEmpty { return fmt.Errorf("DeleteVolume %d: %v", i, err) } else { - glog.Errorf("DeleteVolume %d: %v", i, err) + log.Errorf("DeleteVolume %d: %v", i, err) } } @@ -654,7 +654,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) { } newMaxVolumeCount = newMaxVolumeCount + maxVolumeCount atomic.StoreInt32(&diskLocation.MaxVolumeCount, maxVolumeCount) - glog.V(4).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB", + log.V(-1).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB", diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) hasChanges = hasChanges || currentMaxVolumeCount != atomic.LoadInt32(&diskLocation.MaxVolumeCount) } else { diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 38cf41550..f2ad46463 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -11,7 +11,7 @@ import ( "github.com/klauspost/reedsolomon" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" @@ -51,7 +51,7 @@ func (s *Store) CollectErasureCodingHeartbeat() *master_pb.Heartbeat { func (s *Store) MountEcShards(collection string, vid needle.VolumeId, shardId erasure_coding.ShardId) error { for _, location := range s.Locations { if ecVolume, err := location.LoadEcShard(collection, vid, shardId); err == nil { - glog.V(0).Infof("MountEcShards %d.%d", vid, shardId) + log.V(3).Infof("MountEcShards %d.%d", vid, shardId) var shardBits erasure_coding.ShardBits @@ -90,7 +90,7 @@ func (s *Store) UnmountEcShards(vid needle.VolumeId, shardId erasure_coding.Shar for _, location := range s.Locations { if deleted := location.UnloadEcShard(vid, shardId); deleted { - glog.V(0).Infof("UnmountEcShards %d.%d", vid, shardId) + log.V(3).Infof("UnmountEcShards %d.%d", vid, shardId) s.DeletedEcShardsChan <- message return nil } @@ -150,10 +150,10 @@ func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle, onReadS onReadSizeFn(size) } - glog.V(3).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) + log.V(0).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToActualOffset(), size, intervals) if len(intervals) > 1 { - glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) + log.V(0).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) } bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals) if err != nil { @@ -204,7 +204,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur var readSize int if readSize, err = shard.ReadAt(data, actualOffset); err != nil { if readSize != int(interval.Size) { - glog.V(0).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err) + log.V(3).Infof("read local ec shard %d.%d offset %d: %v", ecVolume.VolumeId, shardId, actualOffset, err) return } } @@ -219,7 +219,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur if err == nil { return } - glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) + log.V(3).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err) } // try reading by recovering from other shards @@ -227,7 +227,7 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur if err == nil { return } - glog.V(0).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err) + log.V(3).Infof("recover ec shard %d.%d : %v", ecVolume.VolumeId, shardId, err) } return } @@ -252,7 +252,7 @@ func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) return nil } - glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) + log.V(0).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) err = operation.WithMasterServerClient(false, s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupEcVolumeRequest{ @@ -289,12 +289,12 @@ func (s *Store) readRemoteEcShardInterval(sourceDataNodes []pb.ServerAddress, ne } for _, sourceDataNode := range sourceDataNodes { - glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) + log.V(0).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return } - glog.V(1).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) + log.V(2).Infof("read remote ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err) } return @@ -341,7 +341,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode pb.ServerAddress, nee } func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) + log.V(0).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) if err != nil { @@ -359,7 +359,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum continue } if len(locations) == 0 { - glog.V(3).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations) + log.V(0).Infof("readRemoteEcShardInterval missing %d.%d from %+v", ecVolume.VolumeId, shardId, locations) continue } @@ -370,7 +370,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum data := make([]byte, len(buf)) nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset) if readErr != nil { - glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) + log.V(0).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) forgetShardId(ecVolume, shardId) } if isDeleted { @@ -386,10 +386,10 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum wg.Wait() if err = enc.ReconstructData(bufs); err != nil { - glog.V(3).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err) + log.V(0).Infof("recovered ec shard %d.%d failed: %v", ecVolume.VolumeId, shardIdToRecover, err) return 0, false, err } - glog.V(4).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) + log.V(-1).Infof("recovered ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) copy(buf, bufs[shardIdToRecover]) diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index a3e028bbb..708c2a636 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/seaweedfs/seaweedfs/weed/pb" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -76,12 +76,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.Sh } for _, sourceDataNode := range sourceDataNodes { - glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) + log.V(-1).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) if err != nil { return err } - glog.V(1).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err) + log.V(2).Infof("delete from remote ec shard %d.%d from %s: %v", ecVolume.VolumeId, shardId, sourceDataNode, err) } return nil diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index 531d859b8..6cf8eb002 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -5,13 +5,13 @@ import ( "github.com/seaweedfs/seaweedfs/weed/stats" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { if v := s.findVolume(volumeId); v != nil { - glog.V(3).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel()) + log.V(0).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel()) return v.garbageLevel(), nil } return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId) diff --git a/weed/storage/super_block/super_block.go b/weed/storage/super_block/super_block.go index d2ef09e6a..f92f92fc2 100644 --- a/weed/storage/super_block/super_block.go +++ b/weed/storage/super_block/super_block.go @@ -3,7 +3,7 @@ package super_block import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/util" @@ -48,12 +48,12 @@ func (s *SuperBlock) Bytes() []byte { if s.Extra != nil { extraData, err := proto.Marshal(s.Extra) if err != nil { - glog.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) + log.Fatalf("cannot marshal super block extra %+v: %v", s.Extra, err) } extraSize := len(extraData) if extraSize > 256*256-2 { // reserve a couple of bits for future extension - glog.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) + log.Fatalf("super block extra size is %d bigger than %d", extraSize, 256*256-2) } s.ExtraSize = uint16(extraSize) util.Uint16toBytes(header[6:8], s.ExtraSize) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index e55564652..46cafe098 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/super_block" "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type Volume struct { @@ -121,7 +121,7 @@ func (v *Volume) FileStat() (datSize uint64, idxSize uint64, modTime time.Time) if e == nil { return uint64(datFileSize), v.nm.IndexFileSize(), modTime } - glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) + log.V(3).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return // -1 causes integer overflow and the volume to become unwritable. } @@ -141,7 +141,7 @@ func (v *Volume) doIsEmpty() (bool, error) { } else { datFileSize, _, e := v.DataBackend.GetStat() if e != nil { - glog.V(0).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) + log.V(3).Infof("Failed to read file size %s %v", v.DataBackend.Name(), e) return false, fmt.Errorf("v.DataBackend.GetStat(): %v", e) } if datFileSize > super_block.SuperBlockSize { @@ -211,12 +211,12 @@ func (v *Volume) SyncToDisk() { defer v.dataFileAccessLock.Unlock() if v.nm != nil { if err := v.nm.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume idx %d", v.Id) + log.Warningf("Volume Close fail to sync volume idx %d", v.Id) } } if v.DataBackend != nil { if err := v.DataBackend.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume %d", v.Id) + log.Warningf("Volume Close fail to sync volume %d", v.Id) } } } @@ -232,19 +232,19 @@ func (v *Volume) Close() { func (v *Volume) doClose() { for v.isCommitCompacting { time.Sleep(521 * time.Millisecond) - glog.Warningf("Volume Close wait for compaction %d", v.Id) + log.Warningf("Volume Close wait for compaction %d", v.Id) } if v.nm != nil { if err := v.nm.Sync(); err != nil { - glog.Warningf("Volume Close fail to sync volume idx %d", v.Id) + log.Warningf("Volume Close fail to sync volume idx %d", v.Id) } v.nm.Close() v.nm = nil } if v.DataBackend != nil { if err := v.DataBackend.Close(); err != nil { - glog.Warningf("Volume Close fail to sync volume %d", v.Id) + log.Warningf("Volume Close fail to sync volume %d", v.Id) } v.DataBackend = nil stats.VolumeServerVolumeGauge.WithLabelValues(v.Collection, "volume").Dec() @@ -270,9 +270,9 @@ func (v *Volume) expired(contentSize uint64, volumeSizeLimit uint64) bool { if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(2).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds) + log.V(1).Infof("volume %d now:%v lastModified:%v", v.Id, time.Now().Unix(), v.lastModifiedTsSeconds) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60 - glog.V(2).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes) + log.V(1).Infof("volume %d ttl:%v lived:%v", v.Id, v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } @@ -298,7 +298,7 @@ func (v *Volume) expiredLongEnough(maxDelayMinutes uint32) bool { func (v *Volume) collectStatus() (maxFileKey types.NeedleId, datFileSize int64, modTime time.Time, fileCount, deletedCount, deletedSize uint64, ok bool) { v.dataFileAccessLock.RLock() defer v.dataFileAccessLock.RUnlock() - glog.V(4).Infof("collectStatus volume %d", v.Id) + log.V(-1).Infof("collectStatus volume %d", v.Id) if v.nm == nil || v.DataBackend == nil { return diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 6d2335f70..d69234db0 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -6,7 +6,7 @@ import ( "io" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/idx" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -122,10 +122,10 @@ func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, return n.AppendAtNs, nil } if fileSize > fileTailOffset { - glog.Warningf("data file %s actual %d bytes expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) + log.Warningf("data file %s actual %d bytes expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) return n.AppendAtNs, fmt.Errorf("data file %s actual %d bytes expected %d bytes", datFile.Name(), fileSize, fileTailOffset) } - glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) + log.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset) } if err = n.ReadData(datFile, offset, size, v); err != nil { return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err) diff --git a/weed/storage/volume_info/volume_info.go b/weed/storage/volume_info/volume_info.go index 24e2b17bc..008e47f49 100644 --- a/weed/storage/volume_info/volume_info.go +++ b/weed/storage/volume_info/volume_info.go @@ -5,7 +5,7 @@ import ( jsonpb "google.golang.org/protobuf/encoding/protojson" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/rclone_backend" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/s3_backend" @@ -17,14 +17,14 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn volumeInfo = &volume_server_pb.VolumeInfo{} - glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) + log.V(2).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { return } hasVolumeInfoFile = true if !canRead { - glog.Warningf("can not read %s", fileName) + log.Warningf("can not read %s", fileName) err = fmt.Errorf("can not read %s", fileName) return } @@ -33,19 +33,19 @@ func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeIn hasVolumeInfoFile = true - glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) + log.V(2).Infof("maybeLoadVolumeInfo reads %s", fileName) fileData, readErr := os.ReadFile(fileName) if readErr != nil { - glog.Warningf("fail to read %s : %v", fileName, readErr) + log.Warningf("fail to read %s : %v", fileName, readErr) err = fmt.Errorf("fail to read %s : %v", fileName, readErr) return } - glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + log.V(2).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) if err = jsonpb.Unmarshal(fileData, volumeInfo); err != nil { if oldVersionErr := tryOldVersionVolumeInfo(fileData, volumeInfo); oldVersionErr != nil { - glog.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) + log.Warningf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) err = fmt.Errorf("unmarshal error: %v oldFormat: %v", err, oldVersionErr) return } else { diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 3334159ed..ac4122957 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -8,7 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -51,7 +51,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if v.HasRemoteFile() { v.noWriteCanDelete = true v.noWriteOrDelete = false - glog.V(0).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) + log.V(3).Infof("loading volume %d from remote %v", v.Id, v.volumeInfo) if err := v.LoadRemoteFile(); err != nil { return fmt.Errorf("load remote file %v: %v", v.volumeInfo, err) } @@ -65,7 +65,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if canWrite { dataFile, err = os.OpenFile(v.FileName(".dat"), os.O_RDWR|os.O_CREATE, 0644) } else { - glog.V(0).Infof("opening %s in READONLY mode", v.FileName(".dat")) + log.V(3).Infof("opening %s in READONLY mode", v.FileName(".dat")) dataFile, err = os.Open(v.FileName(".dat")) v.noWriteOrDelete = true } @@ -95,10 +95,10 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind if err == nil { v.volumeInfo.Version = uint32(v.SuperBlock.Version) } - glog.V(0).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) + log.V(3).Infof("readSuperBlock volume %d version %v", v.Id, v.SuperBlock.Version) if v.HasRemoteFile() { // maybe temporary network problem - glog.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) + log.Errorf("readSuperBlock remote volume %d: %v", v.Id, err) err = nil } } else { @@ -116,16 +116,16 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } // check volume idx files if err := v.checkIdxFile(); err != nil { - glog.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err) + log.Fatalf("check volume idx file %s: %v", v.FileName(".idx"), err) } var indexFile *os.File if v.noWriteOrDelete { - glog.V(0).Infoln("open to read file", v.FileName(".idx")) + log.V(3).Infoln("open to read file", v.FileName(".idx")) if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDONLY, 0644); err != nil { return fmt.Errorf("cannot read Volume Index %s: %v", v.FileName(".idx"), err) } } else { - glog.V(1).Infoln("open to write file", v.FileName(".idx")) + log.V(2).Infoln("open to write file", v.FileName(".idx")) if indexFile, err = os.OpenFile(v.FileName(".idx"), os.O_RDWR|os.O_CREATE, 0644); err != nil { return fmt.Errorf("cannot write Volume Index %s: %v", v.FileName(".idx"), err) } @@ -136,27 +136,27 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind // storage tier, and download to local storage, which may cause the // capactiy overloading. if !v.HasRemoteFile() { - glog.V(0).Infof("checking volume data integrity for volume %d", v.Id) + log.V(3).Infof("checking volume data integrity for volume %d", v.Id) if v.lastAppendAtNs, err = CheckVolumeDataIntegrity(v, indexFile); err != nil { v.noWriteOrDelete = true - glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) + log.V(3).Infof("volumeDataIntegrityChecking failed %v", err) } } if v.noWriteOrDelete || v.noWriteCanDelete { if v.nm, err = NewSortedFileNeedleMap(v.IndexFileName(), indexFile); err != nil { - glog.V(0).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err) + log.V(3).Infof("loading sorted db %s error: %v", v.FileName(".sdx"), err) } } else { switch needleMapKind { case NeedleMapInMemory: if v.tmpNm != nil { - glog.V(0).Infof("updating memory compact index %s ", v.FileName(".idx")) + log.V(3).Infof("updating memory compact index %s ", v.FileName(".idx")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, nil, 0) } else { - glog.V(0).Infoln("loading memory index", v.FileName(".idx"), "to memory") + log.V(3).Infoln("loading memory index", v.FileName(".idx"), "to memory") if v.nm, err = LoadCompactNeedleMap(indexFile); err != nil { - glog.V(0).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err) + log.V(3).Infof("loading index %s to memory error: %v", v.FileName(".idx"), err) } } case NeedleMapLevelDb: @@ -166,12 +166,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } case NeedleMapLevelDbMedium: @@ -181,12 +181,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb medium index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb medium index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb medium index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb medium index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } case NeedleMapLevelDbLarge: @@ -196,12 +196,12 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind CompactionTableSizeMultiplier: 10, // default value is 1 } if v.tmpNm != nil { - glog.V(0).Infoln("updating leveldb large index", v.FileName(".ldb")) + log.V(3).Infoln("updating leveldb large index", v.FileName(".ldb")) err = v.tmpNm.UpdateNeedleMap(v, indexFile, opts, v.ldbTimeout) } else { - glog.V(0).Infoln("loading leveldb large index", v.FileName(".ldb")) + log.V(3).Infoln("loading leveldb large index", v.FileName(".ldb")) if v.nm, err = NewLevelDbNeedleMap(v.FileName(".ldb"), indexFile, opts, v.ldbTimeout); err != nil { - glog.V(0).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) + log.V(3).Infof("loading leveldb %s error: %v", v.FileName(".ldb"), err) } } } @@ -212,7 +212,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind v.volumeInfo.Version = uint32(v.SuperBlock.Version) v.volumeInfo.BytesOffset = uint32(types.OffsetSize) if err := v.SaveVolumeInfo(); err != nil { - glog.Warningf("volume %d failed to save file info: %v", v.Id, err) + log.Warningf("volume %d failed to save file info: %v", v.Id, err) } } diff --git a/weed/storage/volume_read.go b/weed/storage/volume_read.go index f82e3e72d..964a73ca3 100644 --- a/weed/storage/volume_read.go +++ b/weed/storage/volume_read.go @@ -6,7 +6,7 @@ import ( "io" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -28,7 +28,7 @@ func (v *Volume) readNeedle(n *needle.Needle, readOption *ReadOption, onReadSize readSize := nv.Size if readSize.IsDeleted() { if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { - glog.V(3).Infof("reading deleted %s", n.String()) + log.V(0).Infof("reading deleted %s", n.String()) readSize = -readSize } else { return -1, ErrorDeleted @@ -118,7 +118,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr readSize := nv.Size if readSize.IsDeleted() { if readOption != nil && readOption.ReadDeleted && readSize != TombstoneFileSize { - glog.V(3).Infof("reading deleted %s", n.String()) + log.V(0).Infof("reading deleted %s", n.String()) readSize = -readSize } else { return ErrorDeleted @@ -253,7 +253,7 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag if volumeFileScanner.ReadNeedleBody() { // println("needle", n.Id.String(), "offset", offset, "size", n.Size, "rest", rest) if needleBody, err = n.ReadNeedleBody(datBackend, version, offset+NeedleHeaderSize, rest); err != nil { - glog.V(0).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) + log.V(3).Infof("cannot read needle head [%d, %d) body [%d, %d) body length %d: %v", offset, offset+NeedleHeaderSize, offset+NeedleHeaderSize, offset+NeedleHeaderSize+rest, rest, err) // err = fmt.Errorf("cannot read needle body: %v", err) // return } @@ -263,18 +263,18 @@ func ScanVolumeFileFrom(version needle.Version, datBackend backend.BackendStorag return nil } if err != nil { - glog.V(0).Infof("visit needle error: %v", err) + log.V(3).Infof("visit needle error: %v", err) return fmt.Errorf("visit needle error: %v", err) } offset += NeedleHeaderSize + rest - glog.V(4).Infof("==> new entry offset %d", offset) + log.V(-1).Infof("==> new entry offset %d", offset) if n, nh, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { if err == io.EOF { return nil } return fmt.Errorf("cannot read needle header at offset %d: %v", offset, err) } - glog.V(4).Infof("new entry needle size:%d rest:%d", n.Size, rest) + log.V(-1).Infof("new entry needle size:%d rest:%d", n.Size, rest) } return nil } diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index 096d46906..652f71058 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -14,7 +14,7 @@ func (v *Volume) maybeWriteSuperBlock() error { datSize, _, e := v.DataBackend.GetStat() if e != nil { - glog.V(0).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) + log.V(3).Infof("failed to stat datafile %s: %v", v.DataBackend.Name(), e) return e } if datSize == 0 { diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 5d9b67192..8502b97ef 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -2,7 +2,7 @@ package storage import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" "github.com/seaweedfs/seaweedfs/weed/storage/backend" _ "github.com/seaweedfs/seaweedfs/weed/storage/backend/rclone_backend" @@ -27,7 +27,7 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) { } if v.hasRemoteFile { - glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, + log.V(3).Infof("volume %d is tiered to %s as %s and read only", v.Id, v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) } else { if v.volumeInfo.BytesOffset == 0 { @@ -42,12 +42,12 @@ func (v *Volume) maybeLoadVolumeInfo() (found bool) { } else { m = "with" } - glog.Exitf("BytesOffset mismatch in volume info file %s, try use binary version %s large_disk", v.FileName(".vif"), m) + log.Exitf("BytesOffset mismatch in volume info file %s, try use binary version %s large_disk", v.FileName(".vif"), m) return } if err != nil { - glog.Warningf("load volume %d.vif file: %v", v.Id, err) + log.Warningf("load volume %d.vif file: %v", v.Id, err) return } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 9f277d4f5..cb310781d 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -6,7 +6,7 @@ import ( "runtime" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/backend" idx2 "github.com/seaweedfs/seaweedfs/weed/storage/idx" @@ -41,13 +41,13 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(3).Infof("Compacting volume %d ...", v.Id) + log.V(0).Infof("Compacting volume %d ...", v.Id) //no need to lock for copy on write //v.accessLock.Lock() //defer v.accessLock.Unlock() - //glog.V(3).Infof("Got Compaction lock...") + //log.V(0).Infof("Got Compaction lock...") if v.isCompacting || v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already compacting...", v.Id) + log.V(3).Infof("Volume %d is already compacting...", v.Id) return nil } v.isCompacting = true @@ -57,12 +57,12 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) + log.V(0).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("compact failed to sync volume %d", v.Id) + log.V(3).Infof("compact failed to sync volume %d", v.Id) } if err := v.nm.Sync(); err != nil { - glog.V(0).Infof("compact failed to sync volume idx %d", v.Id) + log.V(3).Infof("compact failed to sync volume idx %d", v.Id) } return v.copyDataAndGenerateIndexFile(v.FileName(".cpd"), v.FileName(".cpx"), preallocate, compactionBytePerSecond) } @@ -73,10 +73,10 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64, prog if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(3).Infof("Compact2 volume %d ...", v.Id) + log.V(0).Infof("Compact2 volume %d ...", v.Id) if v.isCompacting || v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already compacting2 ...", v.Id) + log.V(3).Infof("Volume %d is already compacting2 ...", v.Id) return nil } v.isCompacting = true @@ -86,15 +86,15 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64, prog v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision - glog.V(3).Infof("creating copies for volume %d ...", v.Id) + log.V(0).Infof("creating copies for volume %d ...", v.Id) if v.DataBackend == nil { return fmt.Errorf("volume %d backend is empty remote:%v", v.Id, v.HasRemoteFile()) } if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("compact2 failed to sync volume dat %d: %v", v.Id, err) + log.V(3).Infof("compact2 failed to sync volume dat %d: %v", v.Id, err) } if err := v.nm.Sync(); err != nil { - glog.V(0).Infof("compact2 failed to sync volume idx %d: %v", v.Id, err) + log.V(3).Infof("compact2 failed to sync volume idx %d: %v", v.Id, err) } return v.copyDataBasedOnIndexFile( v.FileName(".dat"), v.FileName(".idx"), @@ -111,10 +111,10 @@ func (v *Volume) CommitCompact() error { if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil } - glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) + log.V(3).Infof("Committing volume %d vacuuming...", v.Id) if v.isCommitCompacting { - glog.V(0).Infof("Volume %d is already commit compacting ...", v.Id) + log.V(3).Infof("Volume %d is already commit compacting ...", v.Id) return nil } v.isCommitCompacting = true @@ -125,14 +125,14 @@ func (v *Volume) CommitCompact() error { v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() - glog.V(3).Infof("Got volume %d committing lock...", v.Id) + log.V(0).Infof("Got volume %d committing lock...", v.Id) if v.nm != nil { v.nm.Close() v.nm = nil } if v.DataBackend != nil { if err := v.DataBackend.Close(); err != nil { - glog.V(0).Infof("failed to close volume %d", v.Id) + log.V(3).Infof("failed to close volume %d", v.Id) } } v.DataBackend = nil @@ -140,7 +140,7 @@ func (v *Volume) CommitCompact() error { var e error if e = v.makeupDiff(v.FileName(".cpd"), v.FileName(".cpx"), v.FileName(".dat"), v.FileName(".idx")); e != nil { - glog.V(0).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) + log.V(3).Infof("makeupDiff in CommitCompact volume %d failed %v", v.Id, e) e = os.Remove(v.FileName(".cpd")) if e != nil { return e @@ -169,21 +169,21 @@ func (v *Volume) CommitCompact() error { } } - //glog.V(3).Infof("Pretending to be vacuuming...") + //log.V(0).Infof("Pretending to be vacuuming...") //time.Sleep(20 * time.Second) os.RemoveAll(v.FileName(".ldb")) - glog.V(3).Infof("Loading volume %d commit file...", v.Id) + log.V(0).Infof("Loading volume %d commit file...", v.Id) if e = v.load(true, false, v.needleMapKind, 0); e != nil { return e } - glog.V(3).Infof("Finish committing volume %d", v.Id) + log.V(0).Infof("Finish committing volume %d", v.Id) return nil } func (v *Volume) cleanupCompact() error { - glog.V(0).Infof("Cleaning up volume %d vacuuming...", v.Id) + log.V(3).Infof("Cleaning up volume %d vacuuming...", v.Id) e1 := os.Remove(v.FileName(".cpd")) e2 := os.Remove(v.FileName(".cpx")) @@ -254,7 +254,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idxOffset, err) } key, offset, size := idx2.IdxFileEntry(IdxEntry) - glog.V(4).Infof("key %d offset %d size %d", key, offset, size) + log.V(-1).Infof("key %d offset %d size %d", key, offset, size) if _, found := incrementedHasUpdatedIndexEntry[key]; !found { incrementedHasUpdatedIndexEntry[key] = keyField{ offset: offset, @@ -308,21 +308,21 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI var offset int64 if offset, err = dst.Seek(0, 2); err != nil { - glog.V(0).Infof("failed to seek the end of file: %v", err) + log.V(3).Infof("failed to seek the end of file: %v", err) return } //ensure file writing starting from aligned positions if offset%NeedlePaddingSize != 0 { offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize) if offset, err = dst.Seek(offset, 0); err != nil { - glog.V(0).Infof("failed to align in datafile %s: %v", dst.Name(), err) + log.V(3).Infof("failed to align in datafile %s: %v", dst.Name(), err) return } } //updated needle if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size.IsValid() { //even the needle cache in memory is hit, the need_bytes is correct - glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size) + log.V(-1).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size) var needleBytes []byte needleBytes, err = needle.ReadNeedleBlob(oldDatBackend, increIdxEntry.offset.ToActualOffset(), increIdxEntry.size, v.Version()) if err != nil { @@ -386,7 +386,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in return nil } nv, ok := scanner.v.nm.Get(n.Id) - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) + log.V(-1).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) if ok && nv.Offset.ToActualOffset() == offset && nv.Size > 0 && nv.Size.IsValid() { if err := scanner.nm.Set(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) @@ -397,7 +397,7 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in delta := n.DiskSize(scanner.version) scanner.newOffset += delta scanner.writeThrottler.MaybeSlowdown(delta) - glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size) + log.V(-1).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size) } return nil } @@ -492,7 +492,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da delta := n.DiskSize(version) newOffset += delta writeThrottler.MaybeSlowdown(delta) - glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + log.V(-1).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) return nil }) @@ -511,7 +511,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da v.Id.String(), dstDatSize, expectedContentSize) } } else { - glog.Warningf("volume %s content size: %d less deleted size: %d, new size: %d", + log.Warningf("volume %s content size: %d less deleted size: %d, new size: %d", v.Id.String(), v.nm.ContentSize(), v.nm.DeletedSize(), dstDatSize) } } @@ -522,7 +522,7 @@ func (v *Volume) copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, da indexFile, err := os.OpenFile(datIdxName, os.O_RDWR|os.O_CREATE, 0644) if err != nil { - glog.Errorf("cannot open Volume Index %s: %v", datIdxName, err) + log.Errorf("cannot open Volume Index %s: %v", datIdxName, err) return err } defer func() { diff --git a/weed/storage/volume_write.go b/weed/storage/volume_write.go index cf959b576..b84bbd7bc 100644 --- a/weed/storage/volume_write.go +++ b/weed/storage/volume_write.go @@ -6,7 +6,7 @@ import ( "fmt" "os" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/needle" . "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -40,7 +40,7 @@ func (v *Volume) isFileUnchanged(n *needle.Needle) bool { oldNeedle := new(needle.Needle) err := oldNeedle.ReadData(v.DataBackend, nv.Offset.ToActualOffset(), nv.Size, v.Version()) if err != nil { - glog.V(0).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err) + log.V(3).Infof("Failed to check updated file at offset %d size %d: %v", nv.Offset.ToActualOffset(), nv.Size, err) return false } if oldNeedle.Cookie == n.Cookie && oldNeedle.Checksum == n.Checksum && bytes.Equal(oldNeedle.Data, n.Data) { @@ -107,7 +107,7 @@ func (v *Volume) asyncRequestAppend(request *needle.AsyncRequest) { } func (v *Volume) syncWrite(n *needle.Needle, checkCookie bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() @@ -115,7 +115,7 @@ func (v *Volume) syncWrite(n *needle.Needle, checkCookie bool) (offset uint64, s } func (v *Volume) writeNeedle2(n *needle.Needle, checkCookie bool, fsync bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) if n.Ttl == needle.EMPTY_TTL && v.Ttl != needle.EMPTY_TTL { n.SetHasTtl() n.Ttl = v.Ttl @@ -136,7 +136,7 @@ func (v *Volume) writeNeedle2(n *needle.Needle, checkCookie bool, fsync bool) (o } func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint64, size Size, isUnchanged bool, err error) { - // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) if v.isFileUnchanged(n) { size = Size(n.DataSize) isUnchanged = true @@ -157,7 +157,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint n.Cookie = existingNeedle.Cookie } if existingNeedle.Cookie != n.Cookie { - glog.V(0).Infof("write cookie mismatch: existing %s, new %s", + log.V(3).Infof("write cookie mismatch: existing %s, new %s", needle.NewFileIdFromNeedle(v.Id, existingNeedle), needle.NewFileIdFromNeedle(v.Id, n)) err = fmt.Errorf("mismatching cookie %x", n.Cookie) return @@ -178,7 +178,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint // add to needle map if !ok || uint64(nv.Offset.ToActualOffset()) < offset { if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { - glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) + log.V(-1).Infof("failed to save in needle map %d: %v", n.Id, err) } } if v.lastModifiedTsSeconds < n.LastModified { @@ -188,7 +188,7 @@ func (v *Volume) doWriteRequest(n *needle.Needle, checkCookie bool) (offset uint } func (v *Volume) syncDelete(n *needle.Needle) (Size, error) { - // glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // log.V(-1).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() @@ -217,7 +217,7 @@ func (v *Volume) deleteNeedle2(n *needle.Needle) (Size, error) { } func (v *Volume) doDeleteRequest(n *needle.Needle) (Size, error) { - glog.V(4).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + log.V(-1).Infof("delete needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) nv, ok := v.nm.Get(n.Id) // fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) if ok && nv.Size.IsValid() { @@ -300,7 +300,7 @@ func (v *Volume) startWorker() { if err := v.DataBackend.Sync(); err != nil { // todo: this may generate dirty data or cause data inconsistent, may be weed need to panic? if te := v.DataBackend.Truncate(end); te != nil { - glog.V(0).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te) + log.V(3).Infof("Failed to truncate %s back to %d with error: %v", v.DataBackend.Name(), end, te) } for i := 0; i < len(currentRequests); i++ { if currentRequests[i].IsSucceed() { @@ -334,7 +334,7 @@ func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size newNeedle := new(needle.Needle) err = newNeedle.ReadBytes(needleBlob, nv.Offset.ToActualOffset(), size, v.Version()) if err == nil && oldNeedle.Cookie == newNeedle.Cookie && oldNeedle.Checksum == newNeedle.Checksum && bytes.Equal(oldNeedle.Data, newNeedle.Data) { - glog.V(0).Infof("needle %v already exists", needleId) + log.V(3).Infof("needle %v already exists", needleId) return nil } } @@ -350,7 +350,7 @@ func (v *Volume) WriteNeedleBlob(needleId NeedleId, needleBlob []byte, size Size // add to needle map if err = v.nm.Put(needleId, ToOffset(int64(offset)), size); err != nil { - glog.V(4).Infof("failed to put in needle map %d: %v", needleId, err) + log.V(-1).Infof("failed to put in needle map %d: %v", needleId, err) } return err diff --git a/weed/topology/cluster_commands.go b/weed/topology/cluster_commands.go index 6432828e8..10737732e 100644 --- a/weed/topology/cluster_commands.go +++ b/weed/topology/cluster_commands.go @@ -5,7 +5,7 @@ import ( "fmt" hashicorpRaft "github.com/hashicorp/raft" "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -29,7 +29,7 @@ func (c *MaxVolumeIdCommand) Apply(server raft.Server) (interface{}, error) { before := topo.GetMaxVolumeId() topo.UpAdjustMaxVolumeId(c.MaxVolumeId) - glog.V(1).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId()) + log.V(2).Infoln("max volume id", before, "==>", topo.GetMaxVolumeId()) return nil, nil } diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index 3103dc207..0dd52ea96 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -4,7 +4,7 @@ import ( "fmt" "sync/atomic" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage" @@ -78,7 +78,7 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume for _, v := range existingVolumes { vid := v.Id if _, ok := actualVolumeMap[vid]; !ok { - glog.V(0).Infoln("Deleting volume id:", vid) + log.V(3).Infoln("Deleting volume id:", vid) disk := dn.getOrCreateDisk(v.DiskType) disk.DeleteVolumeById(vid) deletedVolumes = append(deletedVolumes, v) diff --git a/weed/topology/node.go b/weed/topology/node.go index aa178b561..898b13e48 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -8,7 +8,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/stats" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/needle" @@ -76,7 +76,7 @@ func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, option *VolumeGrowOption } n.RUnlock() if len(candidates) < numberOfNodes { - glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + log.V(3).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") return nil, nil, errors.New("Not enough data nodes found!") } @@ -247,7 +247,7 @@ func (n *NodeImpl) doLinkChildNode(node Node) { } n.UpAdjustMaxVolumeId(node.GetMaxVolumeId()) node.SetParent(n) - glog.V(0).Infoln(n, "adds child", node.Id()) + log.V(3).Infoln(n, "adds child", node.Id()) } } @@ -261,7 +261,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { for dt, du := range node.GetDiskUsages().negative().usages { n.UpAdjustDiskUsageDelta(dt, du) } - glog.V(0).Infoln(n, "removes", node.Id()) + log.V(3).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index a2be991fa..a2b4cc6c2 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/security" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -34,7 +34,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt // this is the initial request remoteLocations, err = GetWritableRemoteReplications(s, grpcDialOption, volumeId, masterFn) if err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } @@ -57,7 +57,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorWriteToLocalDisk).Inc() err = fmt.Errorf("failed to write to local disk: %v", err) - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } @@ -93,7 +93,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt err := json.Unmarshal(n.Pairs, &tmpMap) if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorUnmarshalPairs).Inc() - glog.V(0).Infoln("Unmarshal pairs error:", err) + log.V(3).Infoln("Unmarshal pairs error:", err) } for k, v := range tmpMap { pairMap[needle.PairNamePrefix+k] = v @@ -118,12 +118,12 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt uploader, err := operation.NewUploader() if err != nil { - glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) + log.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) return err } _, err = uploader.UploadData(n.Data, uploadOption) if err != nil { - glog.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) + log.Errorf("replication-UploadData, err:%v, url:%s", err, u.String()) } return err }) @@ -131,7 +131,7 @@ func ReplicatedWrite(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOpt if err != nil { stats.VolumeServerHandlerCounter.WithLabelValues(stats.ErrorWriteToReplicas).Inc() err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return false, err } } @@ -147,14 +147,14 @@ func ReplicatedDelete(masterFn operation.GetMasterFn, grpcDialOption grpc.DialOp if r.FormValue("type") != "replicate" { remoteLocations, err = GetWritableRemoteReplications(store, grpcDialOption, volumeId, masterFn) if err != nil { - glog.V(0).Infoln(err) + log.V(3).Infoln(err) return } } size, err = store.DeleteVolumeNeedle(volumeId, n) if err != nil { - glog.V(0).Infoln("delete error:", err) + log.V(3).Infoln("delete error:", err) return } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 750c00ea2..c2a483290 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -17,7 +17,7 @@ import ( hashicorpRaft "github.com/hashicorp/raft" "github.com/seaweedfs/raft" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/sequence" "github.com/seaweedfs/seaweedfs/weed/stats" @@ -143,16 +143,16 @@ func (t *Topology) DoBarrier() bool { return true } - glog.V(0).Infof("raft do barrier") + log.V(3).Infof("raft do barrier") barrier := t.HashicorpRaft.Barrier(2 * time.Minute) if err := barrier.Error(); err != nil { - glog.Errorf("failed to wait for barrier, error %s", err) + log.Errorf("failed to wait for barrier, error %s", err) return false } t.barrierDone = true - glog.V(0).Infof("raft do barrier success") + log.V(3).Infof("raft do barrier success") return true } @@ -326,7 +326,7 @@ func (t *Topology) RegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { } func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { - glog.Infof("removing volume info: %+v from %v", v, dn.id) + log.Infof("removing volume info: %+v from %v", v, dn.id) if v.ReplicaPlacement.GetCopyCount() > 1 { stats.MasterReplicaPlacementMismatch.WithLabelValues(v.Collection, v.Id.String()).Set(0) } @@ -397,7 +397,7 @@ func (t *Topology) SyncDataNodeRegistration(volumes []*master_pb.VolumeInformati if vi, err := storage.NewVolumeInfo(v); err == nil { volumeInfos = append(volumeInfos, vi) } else { - glog.V(0).Infof("Fail to convert joined volume information: %v", err) + log.V(3).Infof("Fail to convert joined volume information: %v", err) } } // find out the delta volumes @@ -422,7 +422,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume for _, v := range newVolumes { vi, err := storage.NewVolumeInfoFromShort(v) if err != nil { - glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err) + log.V(3).Infof("NewVolumeInfoFromShort %v: %v", v, err) continue } newVis = append(newVis, vi) @@ -430,7 +430,7 @@ func (t *Topology) IncrementalSyncDataNodeRegistration(newVolumes, deletedVolume for _, v := range deletedVolumes { vi, err := storage.NewVolumeInfoFromShort(v) if err != nil { - glog.V(0).Infof("NewVolumeInfoFromShort %v: %v", v, err) + log.V(3).Infof("NewVolumeInfoFromShort %v: %v", v, err) continue } oldVis = append(oldVis, vi) @@ -455,15 +455,15 @@ func (t *Topology) DataNodeRegistration(dcName, rackName string, dn *DataNode) { dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) rack.LinkChildNode(dn) - glog.Infof("[%s] reLink To topo ", dn.Id()) + log.Infof("[%s] reLink To topo ", dn.Id()) } func (t *Topology) DisableVacuum() { - glog.V(0).Infof("DisableVacuum") + log.V(3).Infof("DisableVacuum") t.isDisableVacuum = true } func (t *Topology) EnableVacuum() { - glog.V(0).Infof("EnableVacuum") + log.V(3).Infof("EnableVacuum") t.isDisableVacuum = false } diff --git a/weed/topology/topology_ec.go b/weed/topology/topology_ec.go index 53762b49a..9339998d8 100644 --- a/weed/topology/topology_ec.go +++ b/weed/topology/topology_ec.go @@ -1,7 +1,7 @@ package topology import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" @@ -115,7 +115,7 @@ func (t *Topology) RegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, d } func (t *Topology) UnRegisterEcShards(ecShardInfos *erasure_coding.EcVolumeInfo, dn *DataNode) { - glog.Infof("removing ec shard info:%+v", ecShardInfos) + log.Infof("removing ec shard info:%+v", ecShardInfos) t.ecShardMapLock.Lock() defer t.ecShardMapLock.Unlock() diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index e3ad8f2dc..e5de2321e 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -9,7 +9,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/types" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" ) @@ -83,7 +83,7 @@ func (t *Topology) SetVolumeCrowded(volumeInfo storage.VolumeInfo) { func (t *Topology) UnRegisterDataNode(dn *DataNode) { dn.IsTerminating = true for _, v := range dn.GetVolumes() { - glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) + log.V(3).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) diskType := types.ToDiskType(v.DiskType) vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl, diskType) vl.SetVolumeUnavailable(dn, v.Id) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 83be65d7c..8671a8945 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -15,7 +15,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/needle" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/operation" "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb" ) @@ -43,7 +43,7 @@ func (t *Topology) batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vid ne return nil }) if err != nil { - glog.V(0).Infof("Checking vacuuming %d on %s: %v", vid, url, err) + log.V(3).Infof("Checking vacuuming %d on %s: %v", vid, url, err) } }(index, dn.ServerAddress(), vid) } @@ -74,7 +74,7 @@ func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl * ch := make(chan bool, locationlist.Length()) for index, dn := range locationlist.list { go func(index int, url pb.ServerAddress, vid needle.VolumeId) { - glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) + log.V(3).Infoln(index, "Start vacuuming", vid, "on", url) err := operation.WithVolumeServerClient(true, url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { stream, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ VolumeId: uint32(vid), @@ -93,16 +93,16 @@ func (t *Topology) batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl * return recvErr } } - glog.V(0).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%", + log.V(3).Infof("%d vacuum %d on %s processed %d bytes, loadAvg %.02f%%", index, vid, url, resp.ProcessedBytes, resp.LoadAvg_1M*100) } return nil }) if err != nil { - glog.Errorf("Error when vacuuming %d on %s: %v", vid, url, err) + log.Errorf("Error when vacuuming %d on %s: %v", vid, url, err) ch <- false } else { - glog.V(0).Infof("Complete vacuuming %d on %s", vid, url) + log.V(3).Infof("Complete vacuuming %d on %s", vid, url) ch <- true } }(index, dn.ServerAddress(), vid) @@ -128,7 +128,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V isReadOnly := false isFullCapacity := false for _, dn := range vacuumLocationList.list { - glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) + log.V(3).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), @@ -144,10 +144,10 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V return err }) if err != nil { - glog.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when committing vacuum %d on %s: %v", vid, dn.Url(), err) isCommitSuccess = false } else { - glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) + log.V(3).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) } } @@ -177,7 +177,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V return err }) if err != nil { - glog.Errorf("Error when checking volume %d status on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when checking volume %d status on %s: %v", vid, dn.Url(), err) //we mark volume read-only, since the volume state is unknown isReadOnly = true } @@ -201,7 +201,7 @@ func (t *Topology) batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *V func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { - glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) + log.V(3).Infoln("Start cleaning up", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(false, dn.ServerAddress(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ VolumeId: uint32(vid), @@ -209,9 +209,9 @@ func (t *Topology) batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl * return err }) if err != nil { - glog.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err) + log.Errorf("Error when cleaning up vacuum %d on %s: %v", vid, dn.Url(), err) } else { - glog.V(0).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url()) + log.V(3).Infof("Complete cleaning up vacuum %d on %s", vid, dn.Url()) } } } @@ -221,14 +221,14 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float // if there is vacuum going on, return immediately swapped := atomic.CompareAndSwapInt64(&t.vacuumLockCounter, 0, 1) if !swapped { - glog.V(0).Infof("Vacuum is already running") + log.V(3).Infof("Vacuum is already running") return } defer atomic.StoreInt64(&t.vacuumLockCounter, 0) // now only one vacuum process going on - glog.V(1).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d", + log.V(2).Infof("Start vacuum on demand with threshold: %f collection: %s volumeId: %d", garbageThreshold, collection, volumeId) for _, col := range t.collectionMap.Items() { c := col.(*Collection) @@ -255,7 +255,7 @@ func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float } } if automatic && t.isDisableVacuum { - glog.V(0).Infof("Vacuum is disabled") + log.V(3).Infof("Vacuum is disabled") break } } @@ -348,11 +348,11 @@ func (t *Topology) vacuumOneVolumeId(grpcDialOption grpc.DialOption, volumeLayou return } if !isEnoughCopies { - glog.Warningf("skip vacuuming: not enough copies for volume:%d", vid) + log.Warningf("skip vacuuming: not enough copies for volume:%d", vid) return } - glog.V(1).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) + log.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) if vacuumLocationList, needVacuum := t.batchVacuumVolumeCheck( grpcDialOption, vid, locationList, garbageThreshold); needVacuum { if t.batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, vacuumLocationList, preallocate) { diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 23e1d5fd6..c028b6236 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -12,7 +12,7 @@ import ( "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -115,7 +115,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe if res, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil { result = append(result, res...) } else { - glog.V(0).Infof("create %d volume, created %d: %v", targetCount, len(result), e) + log.V(3).Infof("create %d volume, created %d: %v", targetCount, len(result), e) return result, e } } @@ -128,7 +128,7 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo return nil, e } for !topo.LastLeaderChangeTime.Add(constants.VolumePulseSeconds * 2).Before(time.Now()) { - glog.V(0).Infof("wait for volume servers to join back") + log.V(3).Infof("wait for volume servers to join back") time.Sleep(constants.VolumePulseSeconds / 2) } vid, raftErr := topo.NextVolumeId() @@ -266,9 +266,9 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid DiskType: option.DiskType.String(), ModifiedAtSecond: time.Now().Unix(), }) - glog.V(0).Infof("Created Volume %d on %s", vid, server.NodeImpl.String()) + log.V(3).Infof("Created Volume %d on %s", vid, server.NodeImpl.String()) } else { - glog.Warningf("Failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) + log.Warningf("Failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) growErr = fmt.Errorf("failed to assign volume %d on %s: %v", vid, server.NodeImpl.String(), err) break } @@ -279,14 +279,14 @@ func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid server := servers[i] server.AddOrUpdateVolume(vi) topo.RegisterVolumeLayout(vi, server) - glog.V(0).Infof("Registered Volume %d on %s", vid, server.NodeImpl.String()) + log.V(3).Infof("Registered Volume %d on %s", vid, server.NodeImpl.String()) } } else { // cleaning up created volume replicas for i, vi := range createdVolumes { server := servers[i] if err := DeleteVolume(server, grpcDialOption, vi.Id); err != nil { - glog.Warningf("Failed to clean up volume %d on %s", vid, server.NodeImpl.String()) + log.Warningf("Failed to clean up volume %d on %s", vid, server.NodeImpl.String()) } } } diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 852798c19..b82fefe88 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -11,7 +11,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/storage/types" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/needle" "github.com/seaweedfs/seaweedfs/weed/storage/super_block" @@ -159,11 +159,11 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.vid2location[v.Id] = NewVolumeLocationList() } vl.vid2location[v.Id].Set(dn) - // glog.V(4).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount()) + // log.V(-1).Infof("volume %d added to %s len %d copy %d", v.Id, dn.Id(), vl.vid2location[v.Id].Length(), v.ReplicaPlacement.GetCopyCount()) for _, dn := range vl.vid2location[v.Id].list { if vInfo, err := dn.GetVolumesById(v.Id); err == nil { if vInfo.ReadOnly { - glog.V(1).Infof("vid %d removed from writable", v.Id) + log.V(2).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) vl.readonlyVolumes.Add(v.Id, dn) return @@ -171,7 +171,7 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.readonlyVolumes.Remove(v.Id, dn) } } else { - glog.V(1).Infof("vid %d removed from writable", v.Id) + log.V(2).Infof("vid %d removed from writable", v.Id) vl.removeFromWritable(v.Id) vl.readonlyVolumes.Remove(v.Id, dn) return @@ -226,15 +226,15 @@ func (vl *VolumeLayout) ensureCorrectWritables(vid needle.VolumeId) { vl.setVolumeWritable(vid) } else { if !isEnoughCopies { - glog.V(0).Infof("volume %d does not have enough copies", vid) + log.V(3).Infof("volume %d does not have enough copies", vid) } if !isAllWritable { - glog.V(0).Infof("volume %d are not all writable", vid) + log.V(3).Infof("volume %d are not all writable", vid) } if isOversizedVolume { - glog.V(1).Infof("volume %d are oversized", vid) + log.V(2).Infof("volume %d are oversized", vid) } - glog.V(0).Infof("volume %d remove from writable", vid) + log.V(3).Infof("volume %d remove from writable", vid) vl.removeFromWritable(vid) } } @@ -402,7 +402,7 @@ func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool { } vl.removeFromCrowded(vid) if toDeleteIndex >= 0 { - glog.V(0).Infoln("Volume", vid, "becomes unwritable") + log.V(3).Infoln("Volume", vid, "becomes unwritable") vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...) return true } @@ -414,7 +414,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid needle.VolumeId) bool { return false } } - glog.V(0).Infoln("Volume", vid, "becomes writable") + log.V(3).Infoln("Volume", vid, "becomes writable") vl.writables = append(vl.writables, vid) return true } @@ -453,7 +453,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) vl.readonlyVolumes.Remove(vid, dn) vl.oversizedVolumes.Remove(vid, dn) if location.Length() < vl.rp.GetCopyCount() { - glog.V(0).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount()) + log.V(3).Infoln("Volume", vid, "has", location.Length(), "replica, less than required", vl.rp.GetCopyCount()) return vl.removeFromWritable(vid) } } @@ -493,14 +493,14 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool { wasWritable := vl.removeFromWritable(vid) if wasWritable { - glog.V(0).Infof("Volume %d reaches full capacity.", vid) + log.V(3).Infof("Volume %d reaches full capacity.", vid) } return wasWritable } func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) { if _, ok := vl.crowded[vid]; ok { - glog.V(0).Infoln("Volume", vid, "becomes uncrowded") + log.V(3).Infoln("Volume", vid, "becomes uncrowded") delete(vl.crowded, vid) } } @@ -508,7 +508,7 @@ func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) { func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) { if _, ok := vl.crowded[vid]; !ok { vl.crowded[vid] = struct{}{} - glog.V(0).Infoln("Volume", vid, "becomes crowded") + log.V(3).Infoln("Volume", vid, "becomes crowded") } } diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go index 7eee41b9b..b0affbcd4 100644 --- a/weed/util/chunk_cache/chunk_cache.go +++ b/weed/util/chunk_cache/chunk_cache.go @@ -4,7 +4,7 @@ import ( "errors" "sync" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage/needle" ) @@ -66,13 +66,13 @@ func (c *TieredChunkCache) IsInCache(fileId string, lockNeeded bool) (answer boo item := c.memCache.cache.Get(fileId) if item != nil { - glog.V(4).Infof("fileId %s is in memcache", fileId) + log.V(-1).Infof("fileId %s is in memcache", fileId) return true } fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.V(4).Infof("failed to parse file id %s", fileId) + log.V(-1).Infof("failed to parse file id %s", fileId) return false } @@ -80,7 +80,7 @@ func (c *TieredChunkCache) IsInCache(fileId string, lockNeeded bool) (answer boo for k, v := range diskCacheLayer.diskCaches { _, ok := v.nm.Get(fid.Key) if ok { - glog.V(4).Infof("fileId %s is in diskCaches[%d].volume[%d]", fileId, i, k) + log.V(-1).Infof("fileId %s is in diskCaches[%d].volume[%d]", fileId, i, k) return true } } @@ -100,7 +100,7 @@ func (c *TieredChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64 if minSize <= c.onDiskCacheSizeLimit0 { n, err = c.memCache.readChunkAt(data, fileId, offset) if err != nil { - glog.Errorf("failed to read from memcache: %s", err) + log.Errorf("failed to read from memcache: %s", err) } if n == int(len(data)) { return n, nil @@ -109,7 +109,7 @@ func (c *TieredChunkCache) ReadChunkAt(data []byte, fileId string, offset uint64 fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.Errorf("failed to parse file id %s", fileId) + log.Errorf("failed to parse file id %s", fileId) return 0, nil } @@ -143,9 +143,9 @@ func (c *TieredChunkCache) SetChunk(fileId string, data []byte) { c.Lock() defer c.Unlock() - glog.V(4).Infof("SetChunk %s size %d\n", fileId, len(data)) + log.V(-1).Infof("SetChunk %s size %d\n", fileId, len(data)) if c.IsInCache(fileId, false) { - glog.V(4).Infof("fileId %s is already in cache", fileId) + log.V(-1).Infof("fileId %s is already in cache", fileId) return } @@ -160,7 +160,7 @@ func (c *TieredChunkCache) doSetChunk(fileId string, data []byte) { fid, err := needle.ParseFileIdFromString(fileId) if err != nil { - glog.Errorf("failed to parse file id %s", fileId) + log.Errorf("failed to parse file id %s", fileId) return } diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go index 87f05d399..c67e51f27 100644 --- a/weed/util/chunk_cache/chunk_cache_on_disk.go +++ b/weed/util/chunk_cache/chunk_cache_on_disk.go @@ -7,7 +7,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/backend" "github.com/seaweedfs/seaweedfs/weed/storage/types" @@ -63,7 +63,7 @@ func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCac return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err) } - glog.V(1).Infoln("loading leveldb", v.fileName+".ldb") + log.V(2).Infoln("loading leveldb", v.fileName+".ldb") opts := &opt.Options{ BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB @@ -92,9 +92,9 @@ func (v *ChunkCacheVolume) doReset() { v.Shutdown() os.Truncate(v.fileName+".dat", 0) os.Truncate(v.fileName+".idx", 0) - glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb") + log.V(-1).Infof("cache removeAll %s ...", v.fileName+".ldb") os.RemoveAll(v.fileName + ".ldb") - glog.V(4).Infof("cache removed %s", v.fileName+".ldb") + log.V(-1).Infof("cache removed %s", v.fileName+".ldb") } func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) { diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go index fdbaef7c2..af1d79b42 100644 --- a/weed/util/chunk_cache/on_disk_cache_layer.go +++ b/weed/util/chunk_cache/on_disk_cache_layer.go @@ -2,7 +2,7 @@ package chunk_cache import ( "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/storage" "github.com/seaweedfs/seaweedfs/weed/storage/types" "path" @@ -25,7 +25,7 @@ func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount in fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize) if err != nil { - glog.Errorf("failed to add cache %s : %v", fileName, err) + log.Errorf("failed to add cache %s : %v", fileName, err) } else { c.diskCaches = append(c.diskCaches, diskCache) } @@ -47,7 +47,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit { t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset() if resetErr != nil { - glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) + log.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) return } for i := len(c.diskCaches) - 1; i > 0; i-- { @@ -57,7 +57,7 @@ func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { } if err := c.diskCaches[0].WriteNeedle(needleId, data); err != nil { - glog.V(0).Infof("cache write %v size %d: %v", needleId, len(data), err) + log.V(3).Infof("cache write %v size %d: %v", needleId, len(data), err) } } @@ -72,7 +72,7 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) { continue } if err != nil { - glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) + log.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) continue } if len(data) != 0 { @@ -94,7 +94,7 @@ func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length continue } if err != nil { - glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) + log.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) continue } if len(data) != 0 { @@ -114,7 +114,7 @@ func (c *OnDiskCacheLayer) readChunkAt(buffer []byte, needleId types.NeedleId, o continue } if err != nil { - glog.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) + log.Warningf("failed to read cache file %s id %d: %v", diskCache.fileName, needleId, err) continue } if n > 0 { diff --git a/weed/util/cipher.go b/weed/util/cipher.go index f625f885e..b3f228fb3 100644 --- a/weed/util/cipher.go +++ b/weed/util/cipher.go @@ -7,7 +7,7 @@ import ( "errors" "io" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) type CipherKey []byte @@ -15,7 +15,7 @@ type CipherKey []byte func GenCipherKey() CipherKey { key := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, key); err != nil { - glog.Fatalf("random key gen: %v", err) + log.Fatalf("random key gen: %v", err) } return CipherKey(key) } diff --git a/weed/util/compression.go b/weed/util/compression.go index d62ba9088..9bd3d6fbb 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -5,7 +5,7 @@ import ( "fmt" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" // "github.com/klauspost/compress/zstd" ) @@ -31,7 +31,7 @@ func MaybeDecompressData(input []byte) []byte { uncompressed, err := DecompressData(input) if err != nil { if err != UnsupportedCompression { - glog.Errorf("decompressed data: %v", err) + log.Errorf("decompressed data: %v", err) } return input } diff --git a/weed/util/config.go b/weed/util/config.go index e5b32d512..0f56203fc 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/viper" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var ( @@ -50,12 +50,12 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file if strings.Contains(err.Error(), "Not Found") { - glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + log.V(2).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) } else { - glog.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err) + log.Fatalf("Reading %s: %v", viper.ConfigFileUsed(), err) } if required { - glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ + log.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) @@ -63,7 +63,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { return false } } - glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) + log.V(2).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) return true } diff --git a/weed/util/file_util.go b/weed/util/file_util.go index 430b6bc86..f3818725d 100644 --- a/weed/util/file_util.go +++ b/weed/util/file_util.go @@ -5,7 +5,7 @@ import ( "crypto/sha256" "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "os" "os/user" "path/filepath" @@ -24,7 +24,7 @@ func TestFolderWritable(folder string) (err error) { return errors.New("Not a valid folder!") } perm := fileInfo.Mode().Perm() - glog.V(0).Infoln("Folder", folder, "Permission:", perm) + log.V(3).Infoln("Folder", folder, "Permission:", perm) if 0200&perm != 0 { return nil } @@ -67,7 +67,7 @@ func CheckFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti return } if err != nil { - glog.Errorf("check %s: %v", filename, err) + log.Errorf("check %s: %v", filename, err) return } if fi.Mode()&0400 != 0 { diff --git a/weed/util/grace/pprof.go b/weed/util/grace/pprof.go index 620184c9b..6dd2bb5d0 100644 --- a/weed/util/grace/pprof.go +++ b/weed/util/grace/pprof.go @@ -5,14 +5,14 @@ import ( "runtime" "runtime/pprof" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func SetupProfiling(cpuProfile, memProfile string) { if cpuProfile != "" { f, err := os.Create(cpuProfile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } runtime.SetBlockProfileRate(1) runtime.SetMutexProfileFraction(1) @@ -44,7 +44,7 @@ func SetupProfiling(cpuProfile, memProfile string) { runtime.MemProfileRate = 1 f, err := os.Create(memProfile) if err != nil { - glog.Fatal(err) + log.Fatal(err) } OnInterrupt(func() { pprof.WriteHeapProfile(f) diff --git a/weed/util/grace/signal_handling.go b/weed/util/grace/signal_handling.go index 0fc0f43e1..4e6ef456f 100644 --- a/weed/util/grace/signal_handling.go +++ b/weed/util/grace/signal_handling.go @@ -4,7 +4,7 @@ package grace import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "os" "os/signal" "reflect" @@ -45,7 +45,7 @@ func init() { } else { interruptHookLock.RLock() for _, hook := range interruptHooks { - glog.V(4).Infof("exec interrupt hook func name:%s", GetFunctionName(hook)) + log.V(-1).Infof("exec interrupt hook func name:%s", GetFunctionName(hook)) hook() } interruptHookLock.RUnlock() diff --git a/weed/util/http/http_global_client_init.go b/weed/util/http/http_global_client_init.go index 0dcb05cfd..d4f4d752c 100644 --- a/weed/util/http/http_global_client_init.go +++ b/weed/util/http/http_global_client_init.go @@ -1,7 +1,7 @@ package http import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" util_http_client "github.com/seaweedfs/seaweedfs/weed/util/http/client" ) @@ -22,6 +22,6 @@ func InitGlobalHttpClient() { globalHttpClient, err = NewGlobalHttpClient() if err != nil { - glog.Fatalf("error init global http client: %v", err) + log.Fatalf("error init global http client: %v", err) } } diff --git a/weed/util/http/http_global_client_util.go b/weed/util/http/http_global_client_util.go index 33d978d9e..e2e6dfbba 100644 --- a/weed/util/http/http_global_client_util.go +++ b/weed/util/http/http_global_client_util.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var ErrNotFound = fmt.Errorf("not found") @@ -281,7 +281,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isContentCompressed bool, isFullC // drains the response body to avoid memory leak data, _ := io.ReadAll(reader) if len(data) != 0 { - glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) + log.V(2).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) } return n, err } @@ -363,7 +363,7 @@ func readEncryptedUrl(fileUrl, jwt string, cipherKey []byte, isContentCompressed if isContentCompressed { decryptedData, err = util.DecompressData(decryptedData) if err != nil { - glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err) + log.V(3).Infof("unzip decrypt %s: %v", fileUrl, err) } } if len(decryptedData) < int(offset)+size { @@ -423,7 +423,7 @@ func CloseResponse(resp *http.Response) { io.Copy(io.Discard, reader) resp.Body.Close() if reader.BytesRead > 0 { - glog.V(1).Infof("response leftover %d bytes", reader.BytesRead) + log.V(2).Infof("response leftover %d bytes", reader.BytesRead) } } @@ -432,7 +432,7 @@ func CloseRequest(req *http.Request) { io.Copy(io.Discard, reader) req.Body.Close() if reader.BytesRead > 0 { - glog.V(1).Infof("request leftover %d bytes", reader.BytesRead) + log.V(2).Infof("request leftover %d bytes", reader.BytesRead) } } @@ -467,13 +467,13 @@ func RetriedFetchChunkData(buffer []byte, urlStrings []string, cipherKey []byte, break } if err != nil { - glog.V(0).Infof("read %s failed, err: %v", urlString, err) + log.V(3).Infof("read %s failed, err: %v", urlString, err) } else { break } } if err != nil && shouldRetry { - glog.V(0).Infof("retry reading in %v", waitTime) + log.V(3).Infof("retry reading in %v", waitTime) time.Sleep(waitTime) } else { break diff --git a/weed/util/lock_table.go b/weed/util/lock_table.go index a932ae5b1..5c0e11368 100644 --- a/weed/util/lock_table.go +++ b/weed/util/lock_table.go @@ -5,7 +5,7 @@ import ( "sync" "sync/atomic" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) // LockTable is a table of locks that can be acquired. @@ -70,7 +70,7 @@ func (lt *LockTable[T]) AcquireLock(intention string, key T, lockType LockType) // If the lock is held exclusively, wait entry.mu.Lock() if len(entry.waiters) > 0 || lockType == ExclusiveLock || entry.activeExclusiveLockOwnerCount > 0 { - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s wait for %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { @@ -97,7 +97,7 @@ func (lt *LockTable[T]) AcquireLock(intention string, key T, lockType LockType) } // Otherwise, grant the lock - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s locked %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { @@ -150,7 +150,7 @@ func (lt *LockTable[T]) ReleaseLock(key T, lock *ActiveLock) { delete(lt.locksInFlight, key) } - if glog.V(4) { + if log.V(-1).Info != nil { fmt.Printf("ActiveLock %d %s unlocked %+v type=%v with waiters %d active r%d w%d.\n", lock.ID, lock.intention, key, lock.lockType, len(entry.waiters), entry.activeSharedLockOwnerCount, entry.activeExclusiveLockOwnerCount) if len(entry.waiters) > 0 { for _, waiter := range entry.waiters { diff --git a/weed/util/log/README.md b/weed/util/log/README.md new file mode 100644 index 000000000..b6a603bf3 --- /dev/null +++ b/weed/util/log/README.md @@ -0,0 +1,98 @@ +# SeaweedFS Logging Package + +This package provides a logging interface for SeaweedFS using [zap](https://github.com/uber-go/zap) as the underlying logging library. It provides a similar interface to glog while offering the performance and features of zap. + +## Features + +- High-performance structured logging +- JSON output format +- Dynamic log level changes +- Support for both structured and unstructured logging +- Compatible with existing glog-style code +- Thread-safe + +## Usage + +### Basic Setup + +```go +import "github.com/seaweedfs/seaweedfs/weed/util/log" +import "go.uber.org/zap/zapcore" + +// Initialize the logger with info level +log.Init(zapcore.InfoLevel) +``` + +### Basic Logging + +```go +// Basic logging +log.Info("This is an info message") +log.Infof("This is a formatted info message: %s", "hello") +log.Warning("This is a warning message") +log.Warningf("This is a formatted warning message: %s", "hello") +log.Error("This is an error message") +log.Errorf("This is a formatted error message: %s", "hello") +``` + +### Verbose Logging + +```go +// Using V for verbose logging +if log.V(1) { + log.Info("This is a verbose message") +} +``` + +### Structured Logging + +```go +// Using structured logging +logger := log.With( + zap.String("service", "example"), + zap.Int("version", 1), +) +logger.Info("This is a structured log message") + +// Using sugared logger with fields +sugar := log.WithSugar("service", "example", "version", 1) +sugar.Infof("This is a sugared log message with fields: %s", "hello") +``` + +### Fatal Logging + +```go +// Fatal logging (will exit the program) +log.Fatal("This is a fatal message") +log.Fatalf("This is a formatted fatal message: %s", "hello") +``` + +## Log Levels + +The package supports the following log levels: + +- Debug (-1) +- Info (0) +- Warning (1) +- Error (2) +- Fatal (3) + +## Migration from glog + +To migrate from glog to this package: + +1. Replace `import "github.com/golang/glog"` with `import "github.com/seaweedfs/seaweedfs/weed/util/log"` +2. Replace glog function calls with their log package equivalents: + - `glog.Info` -> `log.Info` + - `glog.Infof` -> `log.Infof` + - `glog.Warning` -> `log.Warning` + - `glog.Warningf` -> `log.Warningf` + - `glog.Error` -> `log.Error` + - `glog.Errorf` -> `log.Errorf` + - `glog.Fatal` -> `log.Fatal` + - `glog.Fatalf` -> `log.Fatalf` + - `glog.V(level)` -> `log.V(level)` + +## Example + +See the `example` directory for a complete example of how to use the logging package. \ No newline at end of file diff --git a/weed/util/log/log.go b/weed/util/log/log.go new file mode 100644 index 000000000..0a6af15c9 --- /dev/null +++ b/weed/util/log/log.go @@ -0,0 +1,239 @@ +package log + +import ( + "os" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +// Level is an alias for zapcore.Level +type Level = zapcore.Level + +// LogConfig holds the configuration for logging +type LogConfig struct { + // LogFile is the path to the log file. If empty, logs will be written to stdout + LogFile string + // MaxSize is the maximum size in megabytes of the log file before it gets rotated + MaxSize int + // MaxBackups is the maximum number of old log files to retain + MaxBackups int + // MaxAge is the maximum number of days to retain old log files + MaxAge int + // Compress determines if the rotated log files should be compressed + Compress bool +} + +var ( + // Logger is the global logger instance + Logger *zap.Logger + // Sugar is the global sugared logger instance + Sugar *zap.SugaredLogger + // atom is the atomic level for dynamic log level changes + atom zap.AtomicLevel + // once ensures initialization happens only once + once sync.Once + // defaultLevel is the default logging level if not specified + defaultLevel = zapcore.InfoLevel +) + +// VerboseLogger wraps a sugared logger with verbosity level +type VerboseLogger struct { + level Level +} + +// Verbose returns a VerboseLogger for the given verbosity level +func Verbose(level Level) *VerboseLogger { + return &VerboseLogger{level: level} +} + +// Infof logs a formatted message at info level if the verbosity level is enabled +func (v *VerboseLogger) Infof(format string, args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Infof(format, args...) + } +} + +// Info logs a message at info level if the verbosity level is enabled +func (v *VerboseLogger) Info(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Info(args...) + } +} + +// Infoln logs a message at info level with a newline if the verbosity level is enabled +func (v *VerboseLogger) Infoln(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Infoln(args...) + } +} + +// Warning logs a message at warn level if the verbosity level is enabled +func (v *VerboseLogger) Warning(args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Warn(args...) + } +} + +// Warningf logs a formatted message at warn level if the verbosity level is enabled +func (v *VerboseLogger) Warningf(format string, args ...interface{}) { + if atom.Enabled(v.level) { + Sugar.Warnf(format, args...) + } +} + +// Init initializes the logger with the given level and configuration +func Init(level Level, config *LogConfig) { + once.Do(func() { + // Initialize with default level if not specified + if level == 0 { + level = defaultLevel + } + + atom = zap.NewAtomicLevel() + atom.SetLevel(level) + + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + var writeSyncer zapcore.WriteSyncer + if config != nil && config.LogFile != "" { + // Create a lumberjack logger for log rotation + rotator := &lumberjack.Logger{ + Filename: config.LogFile, + MaxSize: config.MaxSize, // megabytes + MaxBackups: config.MaxBackups, + MaxAge: config.MaxAge, // days + Compress: config.Compress, + } + writeSyncer = zapcore.AddSync(rotator) + } else { + writeSyncer = zapcore.AddSync(os.Stdout) + } + + core := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + writeSyncer, + atom, + ) + + Logger = zap.New(core) + Sugar = Logger.Sugar() + }) +} + +// SetLevel changes the logging level dynamically +func SetLevel(level Level) { + if atom == (zap.AtomicLevel{}) { + Init(level, nil) + return + } + atom.SetLevel(level) +} + +// V returns a VerboseLogger for the given verbosity level +func V(level Level) *VerboseLogger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Verbose(level) +} + +// Info logs a message at info level +func Info(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Info(args...) +} + +// Infof logs a formatted message at info level +func Infof(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Infof(format, args...) +} + +// Warning logs a message at warn level +func Warning(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Warn(args...) +} + +// Warningf logs a formatted message at warn level +func Warningf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Warnf(format, args...) +} + +// Error logs a message at error level +func Error(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Error(args...) +} + +// Errorf logs a formatted message at error level +func Errorf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Errorf(format, args...) +} + +// Fatal logs a message at fatal level and then calls os.Exit(1) +func Fatal(args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatal(args...) +} + +// Fatalf logs a formatted message at fatal level and then calls os.Exit(1) +func Fatalf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatalf(format, args...) +} + +// Exitf logs a formatted message at fatal level and then calls os.Exit(1) +func Exitf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Fatalf(format, args...) + os.Exit(1) +} + +// With returns a logger with the given fields +func With(fields ...zap.Field) *zap.Logger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Logger.With(fields...) +} + +// WithSugar returns a sugared logger with the given fields +func WithSugar(args ...interface{}) *zap.SugaredLogger { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + return Sugar.With(args...) +} + +// Printf logs a formatted message at info level +func Printf(format string, args ...interface{}) { + if atom == (zap.AtomicLevel{}) { + Init(defaultLevel, nil) + } + Sugar.Infof(format, args...) +} diff --git a/weed/util/log_buffer/log_buffer.go b/weed/util/log_buffer/log_buffer.go index fb1f8dc2f..4fb6b2722 100644 --- a/weed/util/log_buffer/log_buffer.go +++ b/weed/util/log_buffer/log_buffer.go @@ -8,7 +8,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" "github.com/seaweedfs/seaweedfs/weed/util" @@ -117,7 +117,7 @@ func (logBuffer *LogBuffer) AddDataToBuffer(partitionKey, data []byte, processin } if logBuffer.startTime.Add(logBuffer.flushInterval).Before(ts) || len(logBuffer.buf)-logBuffer.pos < size+4 { - // glog.V(0).Infof("%s copyToFlush1 batch:%d count:%d start time %v, ts %v, remaining %d bytes", logBuffer.name, logBuffer.batchIndex, len(logBuffer.idx), logBuffer.startTime, ts, len(logBuffer.buf)-logBuffer.pos) + // log.V(3).Infof("%s copyToFlush1 batch:%d count:%d start time %v, ts %v, remaining %d bytes", logBuffer.name, logBuffer.batchIndex, len(logBuffer.idx), logBuffer.startTime, ts, len(logBuffer.buf)-logBuffer.pos) toFlush = logBuffer.copyToFlush() logBuffer.startTime = ts if len(logBuffer.buf) < size+4 { @@ -159,7 +159,7 @@ func (logBuffer *LogBuffer) IsAllFlushed() bool { func (logBuffer *LogBuffer) loopFlush() { for d := range logBuffer.flushChan { if d != nil { - // glog.V(4).Infof("%s flush [%v, %v] size %d", m.name, d.startTime, d.stopTime, len(d.data.Bytes())) + // log.V(-1).Infof("%s flush [%v, %v] size %d", m.name, d.startTime, d.stopTime, len(d.data.Bytes())) logBuffer.flushFn(logBuffer, d.startTime, d.stopTime, d.data.Bytes()) d.releaseMemory() // local logbuffer is different from aggregate logbuffer here @@ -179,10 +179,10 @@ func (logBuffer *LogBuffer) loopInterval() { toFlush := logBuffer.copyToFlush() logBuffer.Unlock() if toFlush != nil { - glog.V(4).Infof("%s flush [%v, %v] size %d", logBuffer.name, toFlush.startTime, toFlush.stopTime, len(toFlush.data.Bytes())) + log.V(-1).Infof("%s flush [%v, %v] size %d", logBuffer.name, toFlush.startTime, toFlush.stopTime, len(toFlush.data.Bytes())) logBuffer.flushChan <- toFlush } else { - // glog.V(0).Infof("%s no flush", m.name) + // log.V(3).Infof("%s no flush", m.name) } } } @@ -198,9 +198,9 @@ func (logBuffer *LogBuffer) copyToFlush() *dataToFlush { stopTime: logBuffer.stopTime, data: copiedBytes(logBuffer.buf[:logBuffer.pos]), } - // glog.V(4).Infof("%s flushing [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) + // log.V(-1).Infof("%s flushing [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) } else { - // glog.V(4).Infof("%s removed from memory [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) + // log.V(-1).Infof("%s removed from memory [0,%d) with %d entries [%v, %v]", m.name, m.pos, len(m.idx), m.startTime, m.stopTime) logBuffer.lastFlushDataTime = logBuffer.stopTime } logBuffer.buf = logBuffer.prevBuffers.SealBuffer(logBuffer.startTime, logBuffer.stopTime, logBuffer.buf, logBuffer.pos, logBuffer.batchIndex) @@ -259,7 +259,7 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return nil, -2, nil } else if lastReadPosition.Before(tsMemory) && lastReadPosition.BatchIndex+1 < tsBatchIndex { // case 2.3 if !logBuffer.lastFlushDataTime.IsZero() { - glog.V(0).Infof("resume with last flush time: %v", logBuffer.lastFlushDataTime) + log.V(3).Infof("resume with last flush time: %v", logBuffer.lastFlushDataTime) return nil, -2, ResumeFromDiskError } } @@ -270,14 +270,14 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return nil, logBuffer.batchIndex, nil } if lastReadPosition.After(logBuffer.stopTime) { - // glog.Fatalf("unexpected last read time %v, older than latest %v", lastReadPosition, m.stopTime) + // log.Fatalf("unexpected last read time %v, older than latest %v", lastReadPosition, m.stopTime) return nil, logBuffer.batchIndex, nil } if lastReadPosition.Before(logBuffer.startTime) { // println("checking ", lastReadPosition.UnixNano()) for _, buf := range logBuffer.prevBuffers.buffers { if buf.startTime.After(lastReadPosition.Time) { - // glog.V(4).Infof("%s return the %d sealed buffer %v", m.name, i, buf.startTime) + // log.V(-1).Infof("%s return the %d sealed buffer %v", m.name, i, buf.startTime) // println("return the", i, "th in memory", buf.startTime.UnixNano()) return copiedBytes(buf.buf[:buf.size]), buf.batchIndex, nil } @@ -287,7 +287,7 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu return copiedBytes(buf.buf[pos:buf.size]), buf.batchIndex, nil } } - // glog.V(4).Infof("%s return the current buf %v", m.name, lastReadPosition) + // log.V(-1).Infof("%s return the current buf %v", m.name, lastReadPosition) return copiedBytes(logBuffer.buf[:logBuffer.pos]), logBuffer.batchIndex, nil } @@ -358,7 +358,7 @@ func readTs(buf []byte, pos int) (size int, ts int64) { err := proto.Unmarshal(entryData, logEntry) if err != nil { - glog.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err) + log.Fatalf("unexpected unmarshal filer_pb.LogEntry: %v", err) } return size, logEntry.TsNs diff --git a/weed/util/log_buffer/log_read.go b/weed/util/log_buffer/log_read.go index cf83de1e5..ac7ffec53 100644 --- a/weed/util/log_buffer/log_read.go +++ b/weed/util/log_buffer/log_read.go @@ -7,7 +7,7 @@ import ( "google.golang.org/protobuf/proto" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/seaweedfs/weed/util" ) @@ -57,7 +57,7 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition if bytesBuf != nil { readSize = bytesBuf.Len() } - glog.V(4).Infof("%s ReadFromBuffer at %v batch %d. Read bytes %v batch %d", readerName, lastReadPosition, lastReadPosition.BatchIndex, readSize, batchIndex) + log.V(-1).Infof("%s ReadFromBuffer at %v batch %d. Read bytes %v batch %d", readerName, lastReadPosition, lastReadPosition.BatchIndex, readSize, batchIndex) if bytesBuf == nil { if batchIndex >= 0 { lastReadPosition = NewMessagePosition(lastReadPosition.UnixNano(), batchIndex) @@ -93,14 +93,14 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition size := util.BytesToUint32(buf[pos : pos+4]) if pos+4+int(size) > len(buf) { err = ResumeError - glog.Errorf("LoopProcessLogData: %s read buffer %v read %d entries [%d,%d) from [0,%d)", readerName, lastReadPosition, batchSize, pos, pos+int(size)+4, len(buf)) + log.Errorf("LoopProcessLogData: %s read buffer %v read %d entries [%d,%d) from [0,%d)", readerName, lastReadPosition, batchSize, pos, pos+int(size)+4, len(buf)) return } entryData := buf[pos+4 : pos+4+int(size)] logEntry := &filer_pb.LogEntry{} if err = proto.Unmarshal(entryData, logEntry); err != nil { - glog.Errorf("unexpected unmarshal mq_pb.Message: %v", err) + log.Errorf("unexpected unmarshal mq_pb.Message: %v", err) pos += 4 + int(size) continue } @@ -112,11 +112,11 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition lastReadPosition = NewMessagePosition(logEntry.TsNs, batchIndex) if isDone, err = eachLogDataFn(logEntry); err != nil { - glog.Errorf("LoopProcessLogData: %s process log entry %d %v: %v", readerName, batchSize+1, logEntry, err) + log.Errorf("LoopProcessLogData: %s process log entry %d %v: %v", readerName, batchSize+1, logEntry, err) return } if isDone { - glog.V(0).Infof("LoopProcessLogData2: %s process log entry %d", readerName, batchSize+1) + log.V(3).Infof("LoopProcessLogData2: %s process log entry %d", readerName, batchSize+1) return } @@ -126,7 +126,7 @@ func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition } - glog.V(4).Infof("%s sent messages ts[%+v,%+v] size %d\n", readerName, startPosition, lastReadPosition, batchSize) + log.V(-1).Infof("%s sent messages ts[%+v,%+v] size %d\n", readerName, startPosition, lastReadPosition, batchSize) } } diff --git a/weed/util/minfreespace.go b/weed/util/minfreespace.go index 0c4461ff1..6743fbd6a 100644 --- a/weed/util/minfreespace.go +++ b/weed/util/minfreespace.go @@ -3,7 +3,7 @@ package util import ( "errors" "fmt" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "strconv" "strings" ) @@ -61,7 +61,7 @@ func MustParseMinFreeSpace(minFreeSpace string, minFreeSpacePercent string) (spa if vv, e := ParseMinFreeSpace(freeString); e == nil { spaces = append(spaces, *vv) } else { - glog.Fatalf("The value specified in -minFreeSpace not a valid value %s", freeString) + log.Fatalf("The value specified in -minFreeSpace not a valid value %s", freeString) } } diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index f235a77b3..1069acc01 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -1,7 +1,7 @@ package util import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "net" "time" @@ -113,7 +113,7 @@ func NewIpAndLocalListeners(host string, port int, timeout time.Duration) (ipLis if host != "localhost" && host != "" && host != "0.0.0.0" && host != "127.0.0.1" && host != "[::]" && host != "[::1]" { listener, err = net.Listen("tcp", JoinHostPort("localhost", port)) if err != nil { - glog.V(0).Infof("skip starting on %s:%d: %v", host, port, err) + log.V(3).Infof("skip starting on %s:%d: %v", host, port, err) return ipListener, nil, nil } diff --git a/weed/util/network.go b/weed/util/network.go index 69559b5f0..1e1132266 100644 --- a/weed/util/network.go +++ b/weed/util/network.go @@ -5,13 +5,13 @@ import ( "strconv" "strings" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) func DetectedHostAddress() string { netInterfaces, err := net.Interfaces() if err != nil { - glog.V(0).Infof("failed to detect net interfaces: %v", err) + log.V(3).Infof("failed to detect net interfaces: %v", err) return "" } @@ -33,7 +33,7 @@ func selectIpV4(netInterfaces []net.Interface, isIpV4 bool) string { } addrs, err := netInterface.Addrs() if err != nil { - glog.V(0).Infof("get interface addresses: %v", err) + log.V(3).Infof("get interface addresses: %v", err) } for _, a := range addrs { diff --git a/weed/util/retry.go b/weed/util/retry.go index 006cda466..705729995 100644 --- a/weed/util/retry.go +++ b/weed/util/retry.go @@ -4,7 +4,7 @@ import ( "strings" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var RetryWaitTime = 6 * time.Second @@ -16,14 +16,14 @@ func Retry(name string, job func() error) (err error) { err = job() if err == nil { if hasErr { - glog.V(0).Infof("retry %s successfully", name) + log.V(3).Infof("retry %s successfully", name) } waitTime = time.Second break } if strings.Contains(err.Error(), "transport") { hasErr = true - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } else { break } @@ -40,14 +40,14 @@ func MultiRetry(name string, errList []string, job func() error) (err error) { err = job() if err == nil { if hasErr { - glog.V(0).Infof("retry %s successfully", name) + log.V(3).Infof("retry %s successfully", name) } waitTime = time.Second break } if containErr(err.Error(), errList) { hasErr = true - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } else { break } @@ -68,7 +68,7 @@ func RetryUntil(name string, job func() error, onErrFn func(err error) (shouldCo } if onErrFn(err) { if strings.Contains(err.Error(), "transport") { - glog.V(0).Infof("retry %s: err: %v", name, err) + log.V(3).Infof("retry %s: err: %v", name, err) } time.Sleep(waitTime) if waitTime < RetryWaitTime { diff --git a/weed/util/skiplist/name_batch.go b/weed/util/skiplist/name_batch.go index 1ab2a6b1f..55b831bdf 100644 --- a/weed/util/skiplist/name_batch.go +++ b/weed/util/skiplist/name_batch.go @@ -1,7 +1,7 @@ package skiplist import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/protobuf/proto" "slices" "strings" @@ -63,7 +63,7 @@ func LoadNameBatch(data []byte) *NameBatch { if len(data) > 0 { err := proto.Unmarshal(data, t) if err != nil { - glog.Errorf("unmarshal into NameBatchData{} : %v", err) + log.Errorf("unmarshal into NameBatchData{} : %v", err) return nil } } diff --git a/weed/util/skiplist/name_list_serde.go b/weed/util/skiplist/name_list_serde.go index 364c0f87a..4ec0da5e9 100644 --- a/weed/util/skiplist/name_list_serde.go +++ b/weed/util/skiplist/name_list_serde.go @@ -1,7 +1,7 @@ package skiplist import ( - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "google.golang.org/protobuf/proto" ) @@ -18,7 +18,7 @@ func LoadNameList(data []byte, store ListStore, batchSize int) *NameList { message := &SkipListProto{} if err := proto.Unmarshal(data, message); err != nil { - glog.Errorf("loading skiplist: %v", err) + log.Errorf("loading skiplist: %v", err) } nl.skipList.MaxNewLevel = int(message.MaxNewLevel) nl.skipList.MaxLevel = int(message.MaxLevel) @@ -65,7 +65,7 @@ func (nl *NameList) ToBytes() []byte { } data, err := proto.Marshal(message) if err != nil { - glog.Errorf("marshal skiplist: %v", err) + log.Errorf("marshal skiplist: %v", err) } return data } diff --git a/weed/util/throttler.go b/weed/util/throttler.go index 873161e37..506b80a51 100644 --- a/weed/util/throttler.go +++ b/weed/util/throttler.go @@ -25,7 +25,7 @@ func (wt *WriteThrottler) MaybeSlowdown(delta int64) { if overLimitBytes > 0 { overRatio := float64(overLimitBytes) / float64(wt.compactionBytePerSecond) sleepTime := time.Duration(overRatio*1000) * time.Millisecond - // glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio) + // log.V(3).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", wt.lastSizeCounter, wt.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio) time.Sleep(sleepTime) } wt.lastSizeCounter, wt.lastSizeCheckTime = 0, time.Now() diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go index 175718cd2..b8c3faf15 100644 --- a/weed/wdclient/exclusive_locks/exclusive_locker.go +++ b/weed/wdclient/exclusive_locks/exclusive_locker.go @@ -5,7 +5,7 @@ import ( "sync/atomic" "time" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/wdclient" ) @@ -104,7 +104,7 @@ func (l *ExclusiveLocker) RequestLock(clientName string) { } return err }); err != nil { - glog.Errorf("failed to renew lock: %v", err) + log.Errorf("failed to renew lock: %v", err) l.isLocked.Store(false) return } else { diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index da46a440b..784d496f6 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -12,7 +12,7 @@ import ( "github.com/seaweedfs/seaweedfs/weed/util" "google.golang.org/grpc" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" "github.com/seaweedfs/seaweedfs/weed/pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb" ) @@ -117,7 +117,7 @@ func (mc *MasterClient) WaitUntilConnected(ctx context.Context) { for { select { case <-ctx.Done(): - glog.V(0).Infof("Connection wait stopped: %v", ctx.Err()) + log.V(3).Infof("Connection wait stopped: %v", ctx.Err()) return default: if mc.getCurrentMaster() != "" { @@ -130,11 +130,11 @@ func (mc *MasterClient) WaitUntilConnected(ctx context.Context) { } func (mc *MasterClient) KeepConnectedToMaster(ctx context.Context) { - glog.V(1).Infof("%s.%s masterClient bootstraps with masters %v", mc.FilerGroup, mc.clientType, mc.masters) + log.V(2).Infof("%s.%s masterClient bootstraps with masters %v", mc.FilerGroup, mc.clientType, mc.masters) for { select { case <-ctx.Done(): - glog.V(0).Infof("Connection to masters stopped: %v", ctx.Err()) + log.V(3).Infof("Connection to masters stopped: %v", ctx.Err()) return default: mc.tryAllMasters(ctx) @@ -158,14 +158,14 @@ func (mc *MasterClient) FindLeaderFromOtherPeers(myMasterAddress pb.ServerAddres leader = resp.Leader return nil }); grpcErr != nil { - glog.V(0).Infof("connect to %s: %v", master, grpcErr) + log.V(3).Infof("connect to %s: %v", master, grpcErr) } if leader != "" { - glog.V(0).Infof("existing leader is %s", leader) + log.V(3).Infof("existing leader is %s", leader) return } } - glog.V(0).Infof("No existing leader found!") + log.V(3).Infof("No existing leader found!") return } @@ -177,7 +177,7 @@ func (mc *MasterClient) tryAllMasters(ctx context.Context) { for nextHintedLeader != "" { select { case <-ctx.Done(): - glog.V(0).Infof("Connection attempt to all masters stopped: %v", ctx.Err()) + log.V(3).Infof("Connection attempt to all masters stopped: %v", ctx.Err()) return default: nextHintedLeader = mc.tryConnectToMaster(ctx, nextHintedLeader) @@ -188,7 +188,7 @@ func (mc *MasterClient) tryAllMasters(ctx context.Context) { } func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.ServerAddress) (nextHintedLeader pb.ServerAddress) { - glog.V(1).Infof("%s.%s masterClient Connecting to master %v", mc.FilerGroup, mc.clientType, master) + log.V(2).Infof("%s.%s masterClient Connecting to master %v", mc.FilerGroup, mc.clientType, master) stats.MasterClientConnectCounter.WithLabelValues("total").Inc() gprcErr := pb.WithMasterClient(true, master, mc.grpcDialOption, false, func(client master_pb.SeaweedClient) error { ctx, cancel := context.WithCancel(ctx) @@ -196,7 +196,7 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server stream, err := client.KeepConnected(ctx) if err != nil { - glog.V(1).Infof("%s.%s masterClient failed to keep connected to %s: %v", mc.FilerGroup, mc.clientType, master, err) + log.V(2).Infof("%s.%s masterClient failed to keep connected to %s: %v", mc.FilerGroup, mc.clientType, master, err) stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToKeepConnected).Inc() return err } @@ -209,15 +209,15 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server ClientAddress: string(mc.clientHost), Version: util.Version(), }); err != nil { - glog.V(0).Infof("%s.%s masterClient failed to send to %s: %v", mc.FilerGroup, mc.clientType, master, err) + log.V(3).Infof("%s.%s masterClient failed to send to %s: %v", mc.FilerGroup, mc.clientType, master, err) stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToSend).Inc() return err } - glog.V(1).Infof("%s.%s masterClient Connected to %v", mc.FilerGroup, mc.clientType, master) + log.V(2).Infof("%s.%s masterClient Connected to %v", mc.FilerGroup, mc.clientType, master) resp, err := stream.Recv() if err != nil { - glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err) + log.V(3).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err) stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc() return err } @@ -225,7 +225,7 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server // check if it is the leader to determine whether to reset the vidMap if resp.VolumeLocation != nil { if resp.VolumeLocation.Leader != "" && string(master) != resp.VolumeLocation.Leader { - glog.V(0).Infof("master %v redirected to leader %v", master, resp.VolumeLocation.Leader) + log.V(3).Infof("master %v redirected to leader %v", master, resp.VolumeLocation.Leader) nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader) stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc() return nil @@ -240,7 +240,7 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server for { resp, err := stream.Recv() if err != nil { - glog.V(0).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err) + log.V(3).Infof("%s.%s masterClient failed to receive from %s: %v", mc.FilerGroup, mc.clientType, master, err) stats.MasterClientConnectCounter.WithLabelValues(stats.FailedToReceive).Inc() return err } @@ -248,7 +248,7 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server if resp.VolumeLocation != nil { // maybe the leader is changed if resp.VolumeLocation.Leader != "" && string(mc.GetMaster(ctx)) != resp.VolumeLocation.Leader { - glog.V(0).Infof("currentMaster %v redirected to leader %v", mc.GetMaster(ctx), resp.VolumeLocation.Leader) + log.V(3).Infof("currentMaster %v redirected to leader %v", mc.GetMaster(ctx), resp.VolumeLocation.Leader) nextHintedLeader = pb.ServerAddress(resp.VolumeLocation.Leader) stats.MasterClientConnectCounter.WithLabelValues(stats.RedirectedToLeader).Inc() return nil @@ -262,9 +262,9 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server if mc.OnPeerUpdate != nil { if update.FilerGroup == mc.FilerGroup { if update.IsAdd { - glog.V(0).Infof("+ %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address) + log.V(3).Infof("+ %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address) } else { - glog.V(0).Infof("- %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address) + log.V(3).Infof("- %s@%s noticed %s.%s %s\n", mc.clientType, mc.clientHost, update.FilerGroup, update.NodeType, update.Address) } stats.MasterClientConnectCounter.WithLabelValues(stats.OnPeerUpdate).Inc() mc.OnPeerUpdate(update, time.Now()) @@ -273,21 +273,21 @@ func (mc *MasterClient) tryConnectToMaster(ctx context.Context, master pb.Server mc.OnPeerUpdateLock.RUnlock() } if err := ctx.Err(); err != nil { - glog.V(0).Infof("Connection attempt to master stopped: %v", err) + log.V(3).Infof("Connection attempt to master stopped: %v", err) return err } } }) if gprcErr != nil { stats.MasterClientConnectCounter.WithLabelValues(stats.Failed).Inc() - glog.V(1).Infof("%s.%s masterClient failed to connect with master %v: %v", mc.FilerGroup, mc.clientType, master, gprcErr) + log.V(2).Infof("%s.%s masterClient failed to connect with master %v: %v", mc.FilerGroup, mc.clientType, master, gprcErr) } return } func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) { if resp.VolumeLocation.IsEmptyUrl() { - glog.V(0).Infof("updateVidMap ignore short heartbeat: %+v", resp) + log.V(3).Infof("updateVidMap ignore short heartbeat: %+v", resp) return } // process new volume location @@ -298,22 +298,22 @@ func (mc *MasterClient) updateVidMap(resp *master_pb.KeepConnectedResponse) { GrpcPort: int(resp.VolumeLocation.GrpcPort), } for _, newVid := range resp.VolumeLocation.NewVids { - glog.V(2).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid) + log.V(1).Infof("%s.%s: %s masterClient adds volume %d", mc.FilerGroup, mc.clientType, loc.Url, newVid) mc.addLocation(newVid, loc) } for _, deletedVid := range resp.VolumeLocation.DeletedVids { - glog.V(2).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid) + log.V(1).Infof("%s.%s: %s masterClient removes volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedVid) mc.deleteLocation(deletedVid, loc) } for _, newEcVid := range resp.VolumeLocation.NewEcVids { - glog.V(2).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid) + log.V(1).Infof("%s.%s: %s masterClient adds ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, newEcVid) mc.addEcLocation(newEcVid, loc) } for _, deletedEcVid := range resp.VolumeLocation.DeletedEcVids { - glog.V(2).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid) + log.V(1).Infof("%s.%s: %s masterClient removes ec volume %d", mc.FilerGroup, mc.clientType, loc.Url, deletedEcVid) mc.deleteEcLocation(deletedEcVid, loc) } - glog.V(1).Infof("updateVidMap(%s) %s.%s: %s volume add: %d, del: %d, add ec: %d del ec: %d", + log.V(2).Infof("updateVidMap(%s) %s.%s: %s volume add: %d, del: %d, add ec: %d del ec: %d", resp.VolumeLocation.DataCenter, mc.FilerGroup, mc.clientType, loc.Url, len(resp.VolumeLocation.NewVids), len(resp.VolumeLocation.DeletedVids), len(resp.VolumeLocation.NewEcVids), len(resp.VolumeLocation.DeletedEcVids)) diff --git a/weed/wdclient/vid_map.go b/weed/wdclient/vid_map.go index 7a2a5bb92..966c199eb 100644 --- a/weed/wdclient/vid_map.go +++ b/weed/wdclient/vid_map.go @@ -10,7 +10,7 @@ import ( "sync" "sync/atomic" - "github.com/seaweedfs/seaweedfs/weed/glog" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) const ( @@ -72,7 +72,7 @@ func (vc *vidMap) isSameDataCenter(loc *Location) bool { func (vc *vidMap) LookupVolumeServerUrl(vid string) (serverUrls []string, err error) { id, err := strconv.Atoi(vid) if err != nil { - glog.V(1).Infof("Unknown volume id %s", vid) + log.V(2).Infof("Unknown volume id %s", vid) return nil, err } @@ -117,7 +117,7 @@ func (vc *vidMap) LookupFileId(fileId string) (fullUrls []string, err error) { func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) { id, err := strconv.Atoi(vid) if err != nil { - glog.V(1).Infof("Unknown volume id %s", vid) + log.V(2).Infof("Unknown volume id %s", vid) return nil, fmt.Errorf("Unknown volume id %s", vid) } foundLocations, found := vc.GetLocations(uint32(id)) @@ -128,7 +128,7 @@ func (vc *vidMap) GetVidLocations(vid string) (locations []Location, err error) } func (vc *vidMap) GetLocations(vid uint32) (locations []Location, found bool) { - // glog.V(4).Infof("~ lookup volume id %d: %+v ec:%+v", vid, vc.vid2Locations, vc.ecVid2Locations) + // log.V(-1).Infof("~ lookup volume id %d: %+v ec:%+v", vid, vc.vid2Locations, vc.ecVid2Locations) locations, found = vc.getLocations(vid) if found && len(locations) > 0 { return locations, found @@ -170,7 +170,7 @@ func (vc *vidMap) addLocation(vid uint32, location Location) { vc.Lock() defer vc.Unlock() - glog.V(4).Infof("+ volume id %d: %+v", vid, location) + log.V(-1).Infof("+ volume id %d: %+v", vid, location) locations, found := vc.vid2Locations[vid] if !found { @@ -192,7 +192,7 @@ func (vc *vidMap) addEcLocation(vid uint32, location Location) { vc.Lock() defer vc.Unlock() - glog.V(4).Infof("+ ec volume id %d: %+v", vid, location) + log.V(-1).Infof("+ ec volume id %d: %+v", vid, location) locations, found := vc.ecVid2Locations[vid] if !found { @@ -218,7 +218,7 @@ func (vc *vidMap) deleteLocation(vid uint32, location Location) { vc.Lock() defer vc.Unlock() - glog.V(4).Infof("- volume id %d: %+v", vid, location) + log.V(-1).Infof("- volume id %d: %+v", vid, location) locations, found := vc.vid2Locations[vid] if !found { @@ -241,7 +241,7 @@ func (vc *vidMap) deleteEcLocation(vid uint32, location Location) { vc.Lock() defer vc.Unlock() - glog.V(4).Infof("- ec volume id %d: %+v", vid, location) + log.V(-1).Infof("- ec volume id %d: %+v", vid, location) locations, found := vc.ecVid2Locations[vid] if !found { diff --git a/weed/weed.go b/weed/weed.go index cde071179..8ca2fb83b 100644 --- a/weed/weed.go +++ b/weed/weed.go @@ -16,11 +16,12 @@ import ( weed_server "github.com/seaweedfs/seaweedfs/weed/server" "github.com/seaweedfs/seaweedfs/weed/util" flag "github.com/seaweedfs/seaweedfs/weed/util/fla9" + "go.uber.org/zap/zapcore" "github.com/getsentry/sentry-go" "github.com/seaweedfs/seaweedfs/weed/command" - "github.com/seaweedfs/seaweedfs/weed/glog" util_http "github.com/seaweedfs/seaweedfs/weed/util/http" + "github.com/seaweedfs/seaweedfs/weed/util/log" ) var IsDebug *bool @@ -48,8 +49,16 @@ func init() { } func main() { - glog.MaxSize = 1024 * 1024 * 10 - glog.MaxFileCount = 5 + config := &log.LogConfig{ + LogFile: "weed.log", + MaxSize: 100, + MaxBackups: 5, + MaxAge: 30, + Compress: true, + } + log.Init(zapcore.InfoLevel, config) + //log.MaxSize = 1024 * 1024 * 10 + //log.MaxFileCount = 5 flag.Usage = usage err := sentry.Init(sentry.ClientOptions{ @@ -207,5 +216,5 @@ func exit() { } func debug(params ...interface{}) { - glog.V(4).Infoln(params...) + log.V(-1).Infoln(params...) } diff --git a/weed/weed.log b/weed/weed.log new file mode 100644 index 000000000..8b3c9ba59 --- /dev/null +++ b/weed/weed.log @@ -0,0 +1 @@ +{"level":"info","ts":"2025-05-20T08:40:55.914-0700","msg":"Reading : Config File \"security\" Not Found in \"[/Users/chrislu/go/src/github.com/seaweedfs/seaweedfs/weed /Users/chrislu/.seaweedfs /usr/local/etc/seaweedfs /etc/seaweedfs]\""}