Browse Source

switch to logrus

losing filename and line number. Critical for debugging.
logrus
Chris Lu 4 years ago
parent
commit
6c9156b25f
  1. 2
      go.mod
  2. 2
      go.sum
  3. 12
      unmaintained/change_superblock/change_superblock.go
  4. 8
      unmaintained/diff_volume_servers/diff_volume_servers.go
  5. 10
      unmaintained/fix_dat/fix_dat.go
  6. 10
      unmaintained/remove_duplicate_fids/remove_duplicate_fids.go
  7. 6
      unmaintained/see_dat/see_dat.go
  8. 4
      unmaintained/see_idx/see_idx.go
  9. 8
      weed/command/benchmark.go
  10. 8
      weed/command/compact.go
  11. 16
      weed/command/export.go
  12. 18
      weed/command/filer.go
  13. 26
      weed/command/filer_replication.go
  14. 10
      weed/command/filer_sync.go
  15. 12
      weed/command/fix.go
  16. 20
      weed/command/master.go
  17. 14
      weed/command/mount_std.go
  18. 10
      weed/command/msg_broker.go
  19. 22
      weed/command/s3.go
  20. 8
      weed/command/server.go
  21. 44
      weed/command/volume.go
  22. 4
      weed/command/volume_test.go
  23. 22
      weed/command/webdav.go
  24. 8
      weed/filer/abstract_sql/abstract_sql_store.go
  25. 4
      weed/filer/abstract_sql/abstract_sql_store_kv.go
  26. 8
      weed/filer/cassandra/cassandra_store.go
  27. 8
      weed/filer/configuration.go
  28. 20
      weed/filer/elastic/v7/elastic_store.go
  29. 8
      weed/filer/elastic/v7/elastic_store_kv.go
  30. 6
      weed/filer/etcd/etcd_store.go
  31. 8
      weed/filer/filechunk_manifest.go
  32. 14
      weed/filer/filechunks.go
  33. 4
      weed/filer/filechunks2_test.go
  34. 40
      weed/filer/filer.go
  35. 6
      weed/filer/filer_buckets.go
  36. 10
      weed/filer/filer_conf.go
  37. 14
      weed/filer/filer_delete_entry.go
  38. 8
      weed/filer/filer_deletion.go
  39. 8
      weed/filer/filer_notify.go
  40. 8
      weed/filer/filer_on_meta_event.go
  41. 6
      weed/filer/filerstore_hardlink.go
  42. 8
      weed/filer/leveldb/leveldb_store.go
  43. 8
      weed/filer/leveldb2/leveldb2_store.go
  44. 24
      weed/filer/meta_aggregator.go
  45. 6
      weed/filer/meta_replay.go
  46. 8
      weed/filer/mongodb/mongodb_store.go
  47. 4
      weed/filer/mongodb/mongodb_store_kv.go
  48. 24
      weed/filer/reader_at.go
  49. 4
      weed/filer/redis/universal_redis_store.go
  50. 4
      weed/filer/redis2/universal_redis_store.go
  51. 16
      weed/filer/stream.go
  52. 70
      weed/filesys/dir.go
  53. 16
      weed/filesys/dir_link.go
  54. 14
      weed/filesys/dir_rename.go
  55. 8
      weed/filesys/dirty_page.go
  56. 8
      weed/filesys/dirty_page_interval.go
  57. 40
      weed/filesys/file.go
  58. 34
      weed/filesys/filehandle.go
  59. 8
      weed/filesys/meta_cache/meta_cache.go
  60. 6
      weed/filesys/meta_cache/meta_cache_init.go
  61. 10
      weed/filesys/meta_cache/meta_cache_subscribe.go
  62. 16
      weed/filesys/wfs.go
  63. 6
      weed/filesys/wfs_deletion.go
  64. 8
      weed/filesys/wfs_write.go
  65. 2
      weed/filesys/xattr.go
  66. 2
      weed/glog/README
  67. 2
      weed/glog/glog.go
  68. 4
      weed/images/resizing.go
  69. 8
      weed/messaging/broker/broker_append.go
  70. 10
      weed/messaging/broker/broker_grpc_server_discovery.go
  71. 10
      weed/messaging/broker/broker_grpc_server_publish.go
  72. 10
      weed/messaging/broker/broker_grpc_server_subscribe.go
  73. 12
      weed/messaging/broker/broker_server.go
  74. 4
      weed/messaging/broker/topic_manager.go
  75. 6
      weed/notification/aws_sqs/aws_sqs_pub.go
  76. 8
      weed/notification/configuration.go
  77. 6
      weed/notification/gocdk_pub_sub/gocdk_pub_sub.go
  78. 14
      weed/notification/google_pub_sub/google_pub_sub.go
  79. 10
      weed/notification/kafka/kafka_queue.go
  80. 4
      weed/notification/log/log_queue.go
  81. 8
      weed/operation/chunked_file.go
  82. 4
      weed/operation/grpc_client.go
  83. 6
      weed/operation/lookup_vid_cache.go
  84. 10
      weed/operation/submit.go
  85. 16
      weed/operation/upload_content.go
  86. 22
      weed/pb/filer_pb/filer_client.go
  87. 10
      weed/pb/filer_pb/filer_pb_helper.go
  88. 14
      weed/pb/volume_info.go
  89. 6
      weed/replication/repl_util/replication_utli.go
  90. 16
      weed/replication/replicator.go
  91. 4
      weed/replication/sink/azuresink/azure_sink.go
  92. 10
      weed/replication/sink/filersink/fetch_write.go
  93. 28
      weed/replication/sink/filersink/filer_sink.go
  94. 6
      weed/replication/sink/gcssink/gcs_sink.go
  95. 10
      weed/replication/sink/s3sink/s3_sink.go
  96. 34
      weed/replication/sink/s3sink/s3_write.go
  97. 10
      weed/replication/source/filer_source.go
  98. 8
      weed/replication/sub/notification_aws_sqs.go
  99. 4
      weed/replication/sub/notification_gocdk_pub_sub.go
  100. 18
      weed/replication/sub/notification_google_pub_sub.go

2
go.mod

@ -9,6 +9,7 @@ require (
github.com/OneOfOne/xxhash v1.2.2 github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1 github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.33.5 github.com/aws/aws-sdk-go v1.33.5
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.3 github.com/chrislusf/raft v1.0.3
@ -60,6 +61,7 @@ require (
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.0.7 github.com/seaweedfs/fuse v1.0.7
github.com/seaweedfs/goexif v1.0.2 github.com/seaweedfs/goexif v1.0.2
github.com/sirupsen/logrus v1.4.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect

2
go.sum

@ -62,6 +62,8 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U= github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e h1:ZOnKnYG1LLgq4W7wZUYj9ntn3RxQ65EZyYqdtFpP2Dw=
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e/go.mod h1:hEvEpPmuwKO+0TbrDQKIkmX0gW2s2waZHF8pIhEEmpM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=

12
unmaintained/change_superblock/change_superblock.go

@ -7,7 +7,7 @@ import (
"path" "path"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -46,7 +46,7 @@ func main() {
} }
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644) datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644)
if err != nil { if err != nil {
glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
log.Fatalf("Open Volume Data File [ERROR]: %v", err)
} }
datBackend := backend.NewDiskFile(datFile) datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close() defer datBackend.Close()
@ -54,7 +54,7 @@ func main() {
superBlock, err := super_block.ReadSuperBlock(datBackend) superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil { if err != nil {
glog.Fatalf("cannot parse existing super block: %v", err)
log.Fatalf("cannot parse existing super block: %v", err)
} }
fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement) fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement)
@ -66,7 +66,7 @@ func main() {
replica, err := super_block.NewReplicaPlacementFromString(*targetReplica) replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
if err != nil { if err != nil {
glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
log.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
} }
fmt.Printf("Changing replication to: %s\n", replica) fmt.Printf("Changing replication to: %s\n", replica)
@ -79,7 +79,7 @@ func main() {
ttl, err := needle.ReadTTL(*targetTTL) ttl, err := needle.ReadTTL(*targetTTL)
if err != nil { if err != nil {
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
log.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
} }
fmt.Printf("Changing ttl to: %s\n", ttl) fmt.Printf("Changing ttl to: %s\n", ttl)
@ -93,7 +93,7 @@ func main() {
header := superBlock.Bytes() header := superBlock.Bytes()
if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil { if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
glog.Fatalf("cannot write super block: %v", e)
log.Fatalf("cannot write super block: %v", e)
} }
fmt.Println("Change Applied.") fmt.Println("Change Applied.")

8
unmaintained/diff_volume_servers/diff_volume_servers.go

@ -11,7 +11,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -47,7 +47,7 @@ func main() {
vid := uint32(*volumeId) vid := uint32(*volumeId)
servers := strings.Split(*serversStr, ",") servers := strings.Split(*serversStr, ",")
if len(servers) < 2 { if len(servers) < 2 {
glog.Fatalf("You must specify more than 1 server\n")
log.Fatalf("You must specify more than 1 server\n")
} }
var referenceServer string var referenceServer string
var maxOffset int64 var maxOffset int64
@ -55,7 +55,7 @@ func main() {
for _, addr := range servers { for _, addr := range servers {
files, offset, err := getVolumeFiles(vid, addr) files, offset, err := getVolumeFiles(vid, addr)
if err != nil { if err != nil {
glog.Fatalf("Failed to copy idx from volume server %s\n", err)
log.Fatalf("Failed to copy idx from volume server %s\n", err)
} }
allFiles[addr] = files allFiles[addr] = files
if offset > maxOffset { if offset > maxOffset {
@ -101,7 +101,7 @@ func main() {
id, err = getNeedleFileId(vid, nid, addr) id, err = getNeedleFileId(vid, nid, addr)
} }
if err != nil { if err != nil {
glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
log.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
} }
fmt.Println(id, addr, diffMsg) fmt.Println(id, addr, diffMsg)
} }

10
unmaintained/fix_dat/fix_dat.go

@ -8,7 +8,7 @@ import (
"path" "path"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -42,26 +42,26 @@ func main() {
} }
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644) indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil { if err != nil {
glog.Fatalf("Read Volume Index %v", err)
log.Fatalf("Read Volume Index %v", err)
} }
defer indexFile.Close() defer indexFile.Close()
datFileName := path.Join(*fixVolumePath, fileName+".dat") datFileName := path.Join(*fixVolumePath, fileName+".dat")
datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644) datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
if err != nil { if err != nil {
glog.Fatalf("Read Volume Data %v", err)
log.Fatalf("Read Volume Data %v", err)
} }
datBackend := backend.NewDiskFile(datFile) datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close() defer datBackend.Close()
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed")) newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
if err != nil { if err != nil {
glog.Fatalf("Write New Volume Data %v", err)
log.Fatalf("Write New Volume Data %v", err)
} }
defer newDatFile.Close() defer newDatFile.Close()
superBlock, err := super_block.ReadSuperBlock(datBackend) superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil { if err != nil {
glog.Fatalf("Read Volume Data superblock %v", err)
log.Fatalf("Read Volume Data superblock %v", err)
} }
newDatFile.Write(superBlock.Bytes()) newDatFile.Write(superBlock.Bytes())

10
unmaintained/remove_duplicate_fids/remove_duplicate_fids.go

@ -6,7 +6,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -49,7 +49,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
newFileName := filepath.Join(*volumePath, "dat_fixed") newFileName := filepath.Join(*volumePath, "dat_fixed")
newDatFile, err := os.Create(newFileName) newDatFile, err := os.Create(newFileName)
if err != nil { if err != nil {
glog.Fatalf("Write New Volume Data %v", err)
log.Fatalf("Write New Volume Data %v", err)
} }
scanner.datBackend = backend.NewDiskFile(newDatFile) scanner.datBackend = backend.NewDiskFile(newDatFile)
scanner.datBackend.WriteAt(scanner.block.Bytes(), 0) scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
@ -58,7 +58,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
checksum := Checksum(n) checksum := Checksum(n)
if scanner.hashes[checksum] { if scanner.hashes[checksum] {
glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
log.Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
return nil return nil
} }
scanner.hashes[checksum] = true scanner.hashes[checksum] = true
@ -83,13 +83,13 @@ func main() {
if _, err := os.Stat(scanner.dir); err != nil { if _, err := os.Stat(scanner.dir); err != nil {
if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil { if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
glog.Fatalf("could not create output dir : %s", err)
log.Fatalf("could not create output dir : %s", err)
} }
} }
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil { if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
} }
} }

6
unmaintained/see_dat/see_dat.go

@ -5,7 +5,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -32,7 +32,7 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
log.Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t) *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil return nil
} }
@ -45,6 +45,6 @@ func main() {
scanner := &VolumeFileScanner4SeeDat{} scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil { if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
} }
} }

4
unmaintained/see_idx/see_idx.go

@ -8,7 +8,7 @@ import (
"path" "path"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
) )
@ -32,7 +32,7 @@ func main() {
} }
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644) indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil { if err != nil {
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
} }
defer indexFile.Close() defer indexFile.Close()

8
weed/command/benchmark.go

@ -16,7 +16,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -119,7 +119,7 @@ func runBenchmark(cmd *Command, args []string) bool {
if *b.cpuprofile != "" { if *b.cpuprofile != "" {
f, err := os.Create(*b.cpuprofile) f, err := os.Create(*b.cpuprofile)
if err != nil { if err != nil {
glog.Fatal(err)
log.Fatal(err)
} }
pprof.StartCPUProfile(f) pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
@ -310,7 +310,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) { func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil { if err != nil {
glog.Fatalf("File to create file %s: %s\n", fileName, err)
log.Fatalf("File to create file %s: %s\n", fileName, err)
} }
defer file.Close() defer file.Close()
@ -329,7 +329,7 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b
func readFileIds(fileName string, fileIdLineChan chan string) { func readFileIds(fileName string, fileIdLineChan chan string) {
file, err := os.Open(fileName) // For read access. file, err := os.Open(fileName) // For read access.
if err != nil { if err != nil {
glog.Fatalf("File to read file %s: %s\n", fileName, err)
log.Fatalf("File to read file %s: %s\n", fileName, err)
} }
defer file.Close() defer file.Close()

8
weed/command/compact.go

@ -1,7 +1,7 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -44,15 +44,15 @@ func runCompact(cmd *Command, args []string) bool {
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid,
storage.NeedleMapInMemory, nil, nil, preallocate, 0) storage.NeedleMapInMemory, nil, nil, preallocate, 0)
if err != nil { if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
log.Fatalf("Load Volume [ERROR] %s\n", err)
} }
if *compactMethod == 0 { if *compactMethod == 0 {
if err = v.Compact(preallocate, 0); err != nil { if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
log.Fatalf("Compact Volume [ERROR] %s\n", err)
} }
} else { } else {
if err = v.Compact2(preallocate, 0); err != nil { if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
log.Fatalf("Compact Volume [ERROR] %s\n", err)
} }
} }

16
weed/command/export.go

@ -13,7 +13,7 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@ -111,11 +111,11 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
vid := scanner.vid vid := scanner.vid
nv, ok := needleMap.Get(n.Id) nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
log.Tracef("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv) n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset { if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
log.Tracef("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix) n.LastModified, newerThanUnix)
return nil return nil
} }
@ -139,9 +139,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
} }
} }
glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
log.Debugf("This seems deleted %d size %d", n.Id, n.Size)
} else { } else {
glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
log.Debugf("Skipping later-updated Id %d size %d", n.Id, n.Size)
} }
return nil return nil
} }
@ -178,7 +178,7 @@ func runExport(cmd *Command, args []string) bool {
outputFile = os.Stdout outputFile = os.Stdout
} else { } else {
if outputFile, err = os.Create(*output); err != nil { if outputFile, err = os.Create(*output); err != nil {
glog.Fatalf("cannot open output tar %s: %s", *output, err)
log.Fatalf("cannot open output tar %s: %s", *output, err)
} }
} }
defer outputFile.Close() defer outputFile.Close()
@ -201,7 +201,7 @@ func runExport(cmd *Command, args []string) bool {
defer needleMap.Close() defer needleMap.Close()
if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil { if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil {
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
log.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
} }
volumeFileScanner := &VolumeFileScanner4Export{ volumeFileScanner := &VolumeFileScanner4Export{
@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
log.Fatalf("Export Volume File [ERROR] %s\n", err)
} }
return true return true
} }

18
weed/command/filer.go

@ -9,7 +9,7 @@ import (
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -152,37 +152,37 @@ func (fo *FilerOptions) startFiler() {
Filers: peers, Filers: peers,
}) })
if nfs_err != nil { if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
log.Fatalf("Filer startup error: %v", nfs_err)
} }
if *fo.publicPort != 0 { if *fo.publicPort != 0 {
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort) publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
log.Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0) publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil { if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
log.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
} }
go func() { go func() {
if e := http.Serve(publicListener, publicVolumeMux); e != nil { if e := http.Serve(publicListener, publicVolumeMux); e != nil {
glog.Fatalf("Volume server fail to serve public: %v", e)
log.Fatalf("Volume server fail to serve public: %v", e)
} }
}() }()
} }
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
log.Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener( filerListener, e := util.NewListener(
*fo.bindIp+":"+strconv.Itoa(*fo.port), *fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second, time.Duration(10)*time.Second,
) )
if e != nil { if e != nil {
glog.Fatalf("Filer listener error: %v", e)
log.Fatalf("Filer listener error: %v", e)
} }
// starting grpc server // starting grpc server
grpcPort := *fo.port + 10000 grpcPort := *fo.port + 10000
grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0) grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs) filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
@ -191,7 +191,7 @@ func (fo *FilerOptions) startFiler() {
httpS := &http.Server{Handler: defaultMux} httpS := &http.Server{Handler: defaultMux}
if err := httpS.Serve(filerListener); err != nil { if err := httpS.Serve(filerListener); err != nil {
glog.Fatalf("Filer Fail to serve: %v", e)
log.Fatalf("Filer Fail to serve: %v", e)
} }
} }

26
weed/command/filer_replication.go

@ -4,7 +4,7 @@ import (
"context" "context"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication" "github.com/chrislusf/seaweedfs/weed/replication"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
@ -48,10 +48,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, input := range sub.NotificationInputs { for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") { if config.GetBool("notification." + input.GetName() + ".enabled") {
if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification input for %s: %+v",
log.Fatalf("Failed to initialize notification input for %s: %+v",
input.GetName(), err) input.GetName(), err)
} }
glog.V(0).Infof("Configure notification input to %s", input.GetName())
log.Infof("Configure notification input to %s", input.GetName())
notificationInput = input notificationInput = input
break break
} }
@ -69,7 +69,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
fromDir := config.GetString("source.filer.directory") fromDir := config.GetString("source.filer.directory")
toDir := config.GetString("sink.filer.directory") toDir := config.GetString("sink.filer.directory")
if strings.HasPrefix(toDir, fromDir) { if strings.HasPrefix(toDir, fromDir) {
glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
log.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
} }
} }
} }
@ -78,10 +78,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, sk := range sink.Sinks { for _, sk := range sink.Sinks {
if config.GetBool("sink." + sk.GetName() + ".enabled") { if config.GetBool("sink." + sk.GetName() + ".enabled") {
if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize sink for %s: %+v",
log.Fatalf("Failed to initialize sink for %s: %+v",
sk.GetName(), err) sk.GetName(), err)
} }
glog.V(0).Infof("Configure sink to %s", sk.GetName())
log.Infof("Configure sink to %s", sk.GetName())
dataSink = sk dataSink = sk
break break
} }
@ -100,7 +100,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for { for {
key, m, err := notificationInput.ReceiveMessage() key, m, err := notificationInput.ReceiveMessage()
if err != nil { if err != nil {
glog.Errorf("receive %s: %+v", key, err)
log.Errorf("receive %s: %+v", key, err)
continue continue
} }
if key == "" { if key == "" {
@ -108,16 +108,16 @@ func runFilerReplicate(cmd *Command, args []string) bool {
continue continue
} }
if m.OldEntry != nil && m.NewEntry == nil { if m.OldEntry != nil && m.NewEntry == nil {
glog.V(1).Infof("delete: %s", key)
log.Debugf("delete: %s", key)
} else if m.OldEntry == nil && m.NewEntry != nil { } else if m.OldEntry == nil && m.NewEntry != nil {
glog.V(1).Infof(" add: %s", key)
log.Debugf(" add: %s", key)
} else { } else {
glog.V(1).Infof("modify: %s", key)
log.Debugf("modify: %s", key)
} }
if err = replicator.Replicate(context.Background(), key, m); err != nil { if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
log.Errorf("replicate %s: %+v", key, err)
} else { } else {
glog.V(1).Infof("replicated %s", key)
log.Debugf("replicated %s", key)
} }
} }
@ -130,7 +130,7 @@ func validateOneEnabledInput(config *viper.Viper) {
if enabledInput == "" { if enabledInput == "" {
enabledInput = input.GetName() enabledInput = input.GetName()
} else { } else {
glog.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
log.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
} }
} }
} }

10
weed/command/filer_sync.go

@ -4,7 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication" "github.com/chrislusf/seaweedfs/weed/replication"
@ -89,7 +89,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB, err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDebug) *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDebug)
if err != nil { if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
log.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond) time.Sleep(1747 * time.Millisecond)
} }
} }
@ -101,7 +101,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA, err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDebug) *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDebug)
if err != nil { if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
log.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond) time.Sleep(2147 * time.Millisecond)
} }
} }
@ -134,7 +134,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return err return err
} }
glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
log.Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
// create filer sink // create filer sink
filerSource := &source.FilerSource{} filerSource := &source.FilerSource{}
@ -264,7 +264,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
counter++ counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
log.Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0 counter = 0
lastWriteTime = time.Now() lastWriteTime = time.Now()
if err := writeSyncOffset(grpcDialOption, targetFiler, sourceFilerSignature, resp.TsNs); err != nil { if err := writeSyncOffset(grpcDialOption, targetFiler, sourceFilerSignature, resp.TsNs); err != nil {

12
weed/command/fix.go

@ -5,7 +5,7 @@ import (
"path" "path"
"strconv" "strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@ -47,12 +47,12 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
} }
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
log.Debugf("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
if n.Size.IsValid() { if n.Size.IsValid() {
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
log.Debugf("saved %d with error %v", n.Size, pe)
} else { } else {
glog.V(2).Infof("skipping deleted file ...")
log.Debugf("skipping deleted file ...")
return scanner.nm.Delete(n.Id) return scanner.nm.Delete(n.Id)
} }
return nil return nil
@ -79,12 +79,12 @@ func runFix(cmd *Command, args []string) bool {
} }
if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil {
glog.Fatalf("scan .dat File: %v", err)
log.Fatalf("scan .dat File: %v", err)
os.Remove(indexFileName) os.Remove(indexFileName)
} }
if err := nm.SaveToIdx(indexFileName); err != nil { if err := nm.SaveToIdx(indexFileName); err != nil {
glog.Fatalf("save to .idx File: %v", err)
log.Fatalf("save to .idx File: %v", err)
os.Remove(indexFileName) os.Remove(indexFileName)
} }

20
weed/command/master.go

@ -14,7 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -94,7 +94,7 @@ func runMaster(cmd *Command, args []string) bool {
os.MkdirAll(*m.metaFolder, 0755) os.MkdirAll(*m.metaFolder, 0755)
} }
if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil { if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
log.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
} }
var masterWhiteList []string var masterWhiteList []string
@ -102,7 +102,7 @@ func runMaster(cmd *Command, args []string) bool {
masterWhiteList = strings.Split(*m.whiteList, ",") masterWhiteList = strings.Split(*m.whiteList, ",")
} }
if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
log.Fatalf("volumeSizeLimitMB should be smaller than 30000")
} }
startMaster(m, masterWhiteList) startMaster(m, masterWhiteList)
@ -119,16 +119,16 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter() r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers) ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port) listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
log.Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0) masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil { if e != nil {
glog.Fatalf("Master startup error: %v", e)
log.Fatalf("Master startup error: %v", e)
} }
// start raftServer // start raftServer
raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState) peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState)
if raftServer == nil { if raftServer == nil {
glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
log.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
} }
ms.SetRaftServer(raftServer) ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
@ -136,14 +136,14 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
grpcPort := *masterOption.port + 10000 grpcPort := *masterOption.port + 10000
grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0) grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
} }
// Create your protocol servers. // Create your protocol servers.
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms) master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer) protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS) reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
log.Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)
go func() { go func() {
@ -165,7 +165,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
} }
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) { func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
log.Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort) masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" { if peers != "" {
cleanedPeers = strings.Split(peers, ",") cleanedPeers = strings.Split(peers, ",")
@ -183,7 +183,7 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
cleanedPeers = append(cleanedPeers, masterAddress) cleanedPeers = append(cleanedPeers, masterAddress)
} }
if len(cleanedPeers)%2 == 0 { if len(cleanedPeers)%2 == 0 {
glog.Fatalf("Only odd number of masters are supported!")
log.Fatalf("Only odd number of masters are supported!")
} }
return return
} }

14
weed/command/mount_std.go

@ -19,7 +19,7 @@ import (
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -54,7 +54,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// parse filer grpc address // parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer) filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
if err != nil { if err != nil {
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
log.Infof("ParseFilerGrpcAddress: %v", err)
return true return true
} }
@ -70,7 +70,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return nil return nil
}) })
if err != nil { if err != nil {
glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
log.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
return true return true
} }
@ -130,7 +130,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// Ensure target mount point availability // Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid { if isValid := checkMountPointAvailable(dir); !isValid {
glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
log.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
return true return true
} }
@ -194,7 +194,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// mount // mount
c, err := fuse.Mount(dir, options...) c, err := fuse.Mount(dir, options...)
if err != nil { if err != nil {
glog.V(0).Infof("mount: %v", err)
log.Infof("mount: %v", err)
return true return true
} }
defer fuse.Unmount(dir) defer fuse.Unmount(dir)
@ -204,13 +204,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
c.Close() c.Close()
}) })
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
log.Infof("mounted %s%s to %s", filer, mountRoot, dir)
err = fs.Serve(c, seaweedFileSystem) err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report // check if the mount process has an error to report
<-c.Ready <-c.Ready
if err := c.MountError; err != nil { if err := c.MountError; err != nil {
glog.V(0).Infof("mount process: %v", err)
log.Infof("mount process: %v", err)
return true return true
} }

10
weed/command/msg_broker.go

@ -10,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/messaging/broker" "github.com/chrislusf/seaweedfs/weed/messaging/broker"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -65,7 +65,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer) filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
if err != nil { if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false return false
} }
@ -82,10 +82,10 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
return nil return nil
}) })
if err != nil { if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
time.Sleep(time.Second) time.Sleep(time.Second)
} else { } else {
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
break break
} }
} }
@ -102,7 +102,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
// start grpc listener // start grpc listener
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
log.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
} }
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs) messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)

22
weed/command/s3.go

@ -12,7 +12,7 @@ import (
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/s3api"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats" stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -137,7 +137,7 @@ func (s3opt *S3Options) startS3Server() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer) filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil { if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false return false
} }
@ -157,14 +157,14 @@ func (s3opt *S3Options) startS3Server() bool {
} }
filerBucketsPath = resp.DirBuckets filerBucketsPath = resp.DirBuckets
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
log.Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil return nil
}) })
if err != nil { if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
time.Sleep(time.Second) time.Sleep(time.Second)
} else { } else {
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
break break
} }
} }
@ -183,7 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
}) })
if s3ApiServer_err != nil { if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
log.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
} }
httpS := &http.Server{Handler: router} httpS := &http.Server{Handler: router}
@ -191,18 +191,18 @@ func (s3opt *S3Options) startS3Server() bool {
listenAddress := fmt.Sprintf(":%d", *s3opt.port) listenAddress := fmt.Sprintf(":%d", *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil { if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
log.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
} }
if *s3opt.tlsPrivateKey != "" { if *s3opt.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
log.Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
log.Fatalf("S3 API Server Fail to serve: %v", err)
} }
} else { } else {
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
log.Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil { if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
log.Fatalf("S3 API Server Fail to serve: %v", err)
} }
} }

8
weed/command/server.go

@ -10,7 +10,7 @@ import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats" stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -124,7 +124,7 @@ func runServer(cmd *Command, args []string) bool {
if *serverOptions.cpuprofile != "" { if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile) f, err := os.Create(*serverOptions.cpuprofile)
if err != nil { if err != nil {
glog.Fatal(err)
log.Fatal(err)
} }
pprof.StartCPUProfile(f) pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
@ -175,14 +175,14 @@ func runServer(cmd *Command, args []string) bool {
folders := strings.Split(*volumeDataFolders, ",") folders := strings.Split(*volumeDataFolders, ",")
if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
log.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
} }
if *masterOptions.metaFolder == "" { if *masterOptions.metaFolder == "" {
*masterOptions.metaFolder = folders[0] *masterOptions.metaFolder = folders[0]
} }
if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil { if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
log.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
} }
filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder

44
weed/command/volume.go

@ -22,7 +22,7 @@ import (
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/server"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats" stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
@ -125,7 +125,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
v.folders = strings.Split(volumeFolders, ",") v.folders = strings.Split(volumeFolders, ",")
for _, folder := range v.folders { for _, folder := range v.folders {
if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil { if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil {
glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
log.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
} }
} }
@ -135,7 +135,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if max, e := strconv.Atoi(maxString); e == nil { if max, e := strconv.Atoi(maxString); e == nil {
v.folderMaxLimits = append(v.folderMaxLimits, max) v.folderMaxLimits = append(v.folderMaxLimits, max)
} else { } else {
glog.Fatalf("The max specified in -max not a valid number %s", maxString)
log.Fatalf("The max specified in -max not a valid number %s", maxString)
} }
} }
if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 { if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
@ -144,7 +144,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
} }
} }
if len(v.folders) != len(v.folderMaxLimits) { if len(v.folders) != len(v.folderMaxLimits) {
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
} }
// set minFreeSpacePercent // set minFreeSpacePercent
@ -153,7 +153,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if value, e := strconv.ParseFloat(freeString, 32); e == nil { if value, e := strconv.ParseFloat(freeString, 32); e == nil {
v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value)) v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
} else { } else {
glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
log.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
} }
} }
if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 { if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
@ -162,7 +162,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
} }
} }
if len(v.folders) != len(v.minFreeSpacePercents) { if len(v.folders) != len(v.minFreeSpacePercents) {
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
log.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
} }
// security related white list configuration // security related white list configuration
@ -172,7 +172,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if *v.ip == "" { if *v.ip == "" {
*v.ip = util.DetectedHostAddress() *v.ip = util.DetectedHostAddress()
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
log.Infof("detected volume server ip address: %v", *v.ip)
} }
if *v.publicPort == 0 { if *v.publicPort == 0 {
@ -226,7 +226,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if v.isSeparatedPublicPort() { if v.isSeparatedPublicPort() {
publicHttpDown = v.startPublicHttpService(publicVolumeMux) publicHttpDown = v.startPublicHttpService(publicVolumeMux)
if nil == publicHttpDown { if nil == publicHttpDown {
glog.Fatalf("start public http service failed")
log.Fatalf("start public http service failed")
} }
} }
@ -239,7 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
// Stop heartbeats // Stop heartbeats
if !volumeServer.StopHeartbeat() { if !volumeServer.StopHeartbeat() {
glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
log.Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second) time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
} }
@ -257,18 +257,18 @@ func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server,
// firstly, stop the public http service to prevent from receiving new user request // firstly, stop the public http service to prevent from receiving new user request
if nil != publicHttpDown { if nil != publicHttpDown {
glog.V(0).Infof("stop public http server ... ")
log.Infof("stop public http server ... ")
if err := publicHttpDown.Stop(); err != nil { if err := publicHttpDown.Stop(); err != nil {
glog.Warningf("stop the public http server failed, %v", err)
log.Warnf("stop the public http server failed, %v", err)
} }
} }
glog.V(0).Infof("graceful stop cluster http server ... ")
log.Infof("graceful stop cluster http server ... ")
if err := clusterHttpServer.Stop(); err != nil { if err := clusterHttpServer.Stop(); err != nil {
glog.Warningf("stop the cluster http server failed, %v", err)
log.Warnf("stop the cluster http server failed, %v", err)
} }
glog.V(0).Infof("graceful stop gRPC ...")
log.Infof("graceful stop gRPC ...")
grpcS.GracefulStop() grpcS.GracefulStop()
volumeServer.Shutdown() volumeServer.Shutdown()
@ -286,14 +286,14 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
grpcPort := *v.port + 10000 grpcPort := *v.port + 10000
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0) grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs) volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS) reflection.Register(grpcS)
go func() { go func() {
if err := grpcS.Serve(grpcL); err != nil { if err := grpcS.Serve(grpcL); err != nil {
glog.Fatalf("start gRPC service failed, %s", err)
log.Fatalf("start gRPC service failed, %s", err)
} }
}() }()
return grpcS return grpcS
@ -301,17 +301,17 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
log.Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil { if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
log.Fatalf("Volume server listener error:%v", e)
} }
pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute} pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener) publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
go func() { go func() {
if err := publicHttpDown.Wait(); err != nil { if err := publicHttpDown.Wait(); err != nil {
glog.Errorf("public http down wait failed, %v", err)
log.Errorf("public http down wait failed, %v", err)
} }
}() }()
@ -328,10 +328,10 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
} }
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
log.Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil { if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
log.Fatalf("Volume server listener error:%v", e)
} }
httpDown := httpdown.HTTP{ httpDown := httpdown.HTTP{
@ -342,7 +342,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener) clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener)
go func() { go func() {
if e := clusterHttpServer.Wait(); e != nil { if e := clusterHttpServer.Wait(); e != nil {
glog.Fatalf("Volume server fail to serve: %v", e)
log.Fatalf("Volume server fail to serve: %v", e)
} }
}() }()
return clusterHttpServer return clusterHttpServer

4
weed/command/volume_test.go

@ -5,9 +5,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
) )
func TestXYZ(t *testing.T) { func TestXYZ(t *testing.T) {
glog.V(0).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
log.Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
} }

22
weed/command/webdav.go

@ -9,7 +9,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -54,7 +54,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
log.Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav() return webDavStandaloneOptions.startWebDav()
@ -76,7 +76,7 @@ func (wo *WebDavOption) startWebDav() bool {
// parse filer grpc address // parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
if err != nil { if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false return false
} }
@ -94,10 +94,10 @@ func (wo *WebDavOption) startWebDav() bool {
return nil return nil
}) })
if err != nil { if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
time.Sleep(time.Second) time.Sleep(time.Second)
} else { } else {
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
break break
} }
} }
@ -114,7 +114,7 @@ func (wo *WebDavOption) startWebDav() bool {
CacheSizeMB: *wo.cacheSizeMB, CacheSizeMB: *wo.cacheSizeMB,
}) })
if webdavServer_err != nil { if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
log.Fatalf("WebDav Server startup error: %v", webdavServer_err)
} }
httpS := &http.Server{Handler: ws.Handler} httpS := &http.Server{Handler: ws.Handler}
@ -122,18 +122,18 @@ func (wo *WebDavOption) startWebDav() bool {
listenAddress := fmt.Sprintf(":%d", *wo.port) listenAddress := fmt.Sprintf(":%d", *wo.port)
webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil { if err != nil {
glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
log.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
} }
if *wo.tlsPrivateKey != "" { if *wo.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
log.Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
log.Fatalf("WebDav Server Fail to serve: %v", err)
} }
} else { } else {
glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
log.Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil { if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
log.Fatalf("WebDav Server Fail to serve: %v", err)
} }
} }

8
weed/filer/abstract_sql/abstract_sql_store.go

@ -5,7 +5,7 @@ import (
"database/sql" "database/sql"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"strings" "strings"
@ -81,7 +81,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
} }
// now the insert failed possibly due to duplication constraints // now the insert failed possibly due to duplication constraints
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
log.Debugf("insert %s falls back to update: %v", entry.FullPath, err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
if err != nil { if err != nil {
@ -187,7 +187,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string var name string
var data []byte var data []byte
if err = rows.Scan(&name, &data); err != nil { if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", fullpath, err)
log.Infof("scan %s : %v", fullpath, err)
return nil, fmt.Errorf("scan %s: %v", fullpath, err) return nil, fmt.Errorf("scan %s: %v", fullpath, err)
} }
@ -195,7 +195,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
FullPath: util.NewFullPath(string(fullpath), name), FullPath: util.NewFullPath(string(fullpath), name),
} }
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
log.Infof("scan decode %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
} }

4
weed/filer/abstract_sql/abstract_sql_store_kv.go

@ -8,7 +8,7 @@ import (
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -24,7 +24,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
} }
// now the insert failed possibly due to duplication constraints // now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
log.Debugf("kv insert falls back to update: %s", err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr) res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
if err != nil { if err != nil {

8
weed/filer/cassandra/cassandra_store.go

@ -6,7 +6,7 @@ import (
"github.com/gocql/gocql" "github.com/gocql/gocql"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -42,7 +42,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
store.cluster.Consistency = gocql.LocalQuorum store.cluster.Consistency = gocql.LocalQuorum
store.session, err = store.cluster.CreateSession() store.session, err = store.cluster.CreateSession()
if err != nil { if err != nil {
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
log.Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
} }
return return
} }
@ -155,13 +155,13 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
} }
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry) entries = append(entries, entry)
} }
if err := iter.Close(); err != nil { if err := iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
log.Infof("list iterator close: %v", err)
} }
return entries, err return entries, err

8
weed/filer/configuration.go

@ -3,7 +3,7 @@ package filer
import ( import (
"os" "os"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -18,11 +18,11 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
for _, store := range Stores { for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") { if config.GetBool(store.GetName() + ".enabled") {
if err := store.Initialize(config, store.GetName()+"."); err != nil { if err := store.Initialize(config, store.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize store for %s: %+v",
log.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err) store.GetName(), err)
} }
f.SetStore(store) f.SetStore(store)
glog.V(0).Infof("Configure filer for %s", store.GetName())
log.Infof("Configure filer for %s", store.GetName())
return return
} }
} }
@ -43,7 +43,7 @@ func validateOneEnabledStore(config *viper.Viper) {
if enabledStore == "" { if enabledStore == "" {
enabledStore = store.GetName() enabledStore = store.GetName()
} else { } else {
glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
} }
} }
} }

20
weed/filer/elastic/v7/elastic_store.go

@ -7,7 +7,7 @@ import (
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
@ -67,7 +67,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre
if store.maxPageSize <= 0 { if store.maxPageSize <= 0 {
store.maxPageSize = 10000 store.maxPageSize = 10000
} }
glog.Infof("filer store elastic endpoints: %v.", servers)
log.Infof("filer store elastic endpoints: %v.", servers)
return store.initialize(options) return store.initialize(options)
} }
@ -110,7 +110,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
} }
value, err := jsoniter.Marshal(esEntry) value, err := jsoniter.Marshal(esEntry)
if err != nil { if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err) return fmt.Errorf("insert entry %v.", err)
} }
_, err = store.client.Index(). _, err = store.client.Index().
@ -120,7 +120,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
BodyJson(string(value)). BodyJson(string(value)).
Do(ctx) Do(ctx)
if err != nil { if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err) return fmt.Errorf("insert entry %v.", err)
} }
return nil return nil
@ -149,7 +149,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
err := jsoniter.Unmarshal(searchResult.Source, esEntry) err := jsoniter.Unmarshal(searchResult.Source, esEntry)
return esEntry.Entry, err return esEntry.Entry, err
} }
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
log.Errorf("find entry(%s),%v.", string(fullpath), err)
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
@ -167,7 +167,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
return nil return nil
} }
glog.Errorf("delete index(%s) %v.", index, err)
log.Errorf("delete index(%s) %v.", index, err)
return err return err
} }
@ -182,7 +182,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
return nil return nil
} }
} }
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v.", err) return fmt.Errorf("delete entry %v.", err)
} }
@ -207,7 +207,7 @@ func (store *ElasticStore) ListDirectoryEntries(
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) { func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
indexResult, err := store.client.CatIndices().Do(ctx) indexResult, err := store.client.CatIndices().Do(ctx)
if err != nil { if err != nil {
glog.Errorf("list indices %v.", err)
log.Errorf("list indices %v.", err)
return entries, err return entries, err
} }
for _, index := range indexResult { for _, index := range indexResult {
@ -249,7 +249,7 @@ func (store *ElasticStore) listDirectoryEntries(
result := &elastic.SearchResult{} result := &elastic.SearchResult{}
if (startFileName == "" && first) || inclusive { if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil { if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err return entries, err
} }
} else { } else {
@ -259,7 +259,7 @@ func (store *ElasticStore) listDirectoryEntries(
} }
after := weed_util.Md5String([]byte(fullPath)) after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err return entries, err
} }
} }

8
weed/filer/elastic/v7/elastic_store_kv.go

@ -6,7 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7" elastic "github.com/olivere/elastic/v7"
) )
@ -22,7 +22,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
return nil return nil
} }
} }
glog.Errorf("delete key(id:%s) %v.", string(key), err)
log.Errorf("delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v.", err) return fmt.Errorf("delete key %v.", err)
} }
@ -41,7 +41,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
return esEntry.Value, nil return esEntry.Value, nil
} }
} }
glog.Errorf("find key(%s),%v.", string(key), err)
log.Errorf("find key(%s),%v.", string(key), err)
return value, filer.ErrKvNotFound return value, filer.ErrKvNotFound
} }
@ -49,7 +49,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
esEntry := &ESKVEntry{value} esEntry := &ESKVEntry{value}
val, err := jsoniter.Marshal(esEntry) val, err := jsoniter.Marshal(esEntry)
if err != nil { if err != nil {
glog.Errorf("insert key(%s) %v.", string(key), err)
log.Errorf("insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v.", err) return fmt.Errorf("insert key %v.", err)
} }
_, err = store.client.Index(). _, err = store.client.Index().

6
weed/filer/etcd/etcd_store.go

@ -9,7 +9,7 @@ import (
"go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/clientv3"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -45,7 +45,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
} }
func (store *EtcdStore) initialize(servers string, timeout string) (err error) { func (store *EtcdStore) initialize(servers string, timeout string) (err error) {
glog.Infof("filer store etcd: %s", servers)
log.Infof("filer store etcd: %s", servers)
to, err := time.ParseDuration(timeout) to, err := time.ParseDuration(timeout)
if err != nil { if err != nil {
@ -169,7 +169,7 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
} }
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry) entries = append(entries, entry)

8
weed/filer/filechunk_manifest.go

@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -87,7 +87,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlStrings, err := lookupFileIdFn(fileId) urlStrings, err := lookupFileIdFn(fileId)
if err != nil { if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err return nil, err
} }
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0) return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
@ -108,14 +108,14 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
break break
} }
if err != nil { if err != nil {
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
log.Infof("read %s failed, err: %v", urlString, err)
buffer.Reset() buffer.Reset()
} else { } else {
break break
} }
} }
if err != nil && shouldRetry { if err != nil && shouldRetry {
glog.V(0).Infof("retry reading in %v", waitTime)
log.Infof("retry reading in %v", waitTime)
time.Sleep(waitTime) time.Sleep(waitTime)
} else { } else {
break break

14
weed/filer/filechunks.go

@ -158,9 +158,9 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int
func logPrintf(name string, visibles []VisibleInterval) { func logPrintf(name string, visibles []VisibleInterval) {
/* /*
glog.V(0).Infof("%s len %d", name, len(visibles))
log.Infof("%s len %d", name, len(visibles))
for _, v := range visibles { for _, v := range visibles {
glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
log.Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
} }
*/ */
} }
@ -185,22 +185,22 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
} }
logPrintf(" before", visibles) logPrintf(" before", visibles)
// glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
// log.Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
chunkStop := chunk.Offset + int64(chunk.Size) chunkStop := chunk.Offset + int64(chunk.Size)
for _, v := range visibles { for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop { if v.start < chunk.Offset && chunk.Offset < v.stop {
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t) newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
// log.Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
} }
if v.start < chunkStop && chunkStop < v.stop { if v.start < chunkStop && chunkStop < v.stop {
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t) newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
// log.Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
} }
if chunkStop <= v.start || v.stop <= chunk.Offset { if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v) newVisibles = append(newVisibles, v)
// glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
// log.Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
} }
} }
newVisibles = append(newVisibles, newV) newVisibles = append(newVisibles, newV)
@ -240,7 +240,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chu
for _, chunk := range chunks { for _, chunk := range chunks {
// glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
// log.Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
visibles = MergeIntoVisibles(visibles, chunk) visibles = MergeIntoVisibles(visibles, chunk)
logPrintf("add", visibles) logPrintf("add", visibles)

4
weed/filer/filechunks2_test.go

@ -4,7 +4,7 @@ import (
"sort" "sort"
"testing" "testing"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -41,6 +41,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) {
return chunks[i].Offset < chunks[j].Offset return chunks[i].Offset < chunks[j].Offset
}) })
for _, chunk := range chunks { for _, chunk := range chunks {
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
log.Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
} }
} }

40
weed/filer/filer.go

@ -9,7 +9,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer" "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@ -93,14 +93,14 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
storeIdBytes = make([]byte, 4) storeIdBytes = make([]byte, 4)
util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
} }
glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
log.Infof("create %s to %d", FilerStoreId, f.Signature)
} else if err == nil && len(storeIdBytes) == 4 { } else if err == nil && len(storeIdBytes) == 4 {
f.Signature = int32(util.BytesToUint32(storeIdBytes)) f.Signature = int32(util.BytesToUint32(storeIdBytes))
glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
log.Infof("existing %s = %d", FilerStoreId, f.Signature)
} else { } else {
glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
} }
} }
@ -145,7 +145,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
// fmt.Printf("%d directory: %+v\n", i, dirPath) // fmt.Printf("%d directory: %+v\n", i, dirPath)
// check the store directly // check the store directly
glog.V(4).Infof("find uncached directory: %s", dirPath)
log.Tracef("find uncached directory: %s", dirPath)
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
// no such existing directory // no such existing directory
@ -169,11 +169,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}, },
} }
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
log.Debugf("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.Store.InsertEntry(ctx, dirEntry) mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil { if mkdirErr != nil {
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
log.Tracef("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
} }
} else { } else {
@ -182,7 +182,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
} }
} else if !dirEntry.IsDirectory() { } else if !dirEntry.IsDirectory() {
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath) return fmt.Errorf("%s is a file", dirPath)
} }
@ -194,13 +194,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
} }
if lastDirectoryEntry == nil { if lastDirectoryEntry == nil {
glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
log.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath)
} }
/* /*
if !hasWritePermission(lastDirectoryEntry, entry) { if !hasWritePermission(lastDirectoryEntry, entry) {
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
log.Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
} }
@ -209,19 +209,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
oldEntry, _ := f.FindEntry(ctx, entry.FullPath) oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
if oldEntry == nil { if oldEntry == nil {
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
log.Tracef("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil { if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
log.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
} }
} else { } else {
if o_excl { if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
log.Tracef("EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
} }
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
log.Tracef("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
log.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
} }
} }
@ -231,7 +231,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
f.deleteChunksIfNotNew(oldEntry, entry) f.deleteChunksIfNotNew(oldEntry, entry)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
log.Tracef("CreateEntry %s: created", entry.FullPath)
return nil return nil
} }
@ -239,11 +239,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil { if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() { if oldEntry.IsDirectory() && !entry.IsDirectory() {
glog.Errorf("existing %s is a directory", entry.FullPath)
log.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath) return fmt.Errorf("existing %s is a directory", entry.FullPath)
} }
if !oldEntry.IsDirectory() && entry.IsDirectory() { if !oldEntry.IsDirectory() && entry.IsDirectory() {
glog.Errorf("existing %s is a file", entry.FullPath)
log.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath) return fmt.Errorf("existing %s is a file", entry.FullPath)
} }
} }
@ -321,7 +321,7 @@ func (f *Filer) Shutdown() {
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds { for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
log.Errorf("delete hard link id %d : %v", hardLinkId, err)
} }
} }
} }

6
weed/filer/filer_buckets.go

@ -5,7 +5,7 @@ import (
"math" "math"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -32,7 +32,7 @@ func (f *Filer) LoadBuckets() {
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "") entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
if err != nil { if err != nil {
glog.V(1).Infof("no buckets found: %v", err)
log.Debugf("no buckets found: %v", err)
return return
} }
@ -41,7 +41,7 @@ func (f *Filer) LoadBuckets() {
shouldFsyncMap[bucket] = true shouldFsyncMap[bucket] = true
} }
glog.V(1).Infof("buckets found: %d", len(entries))
log.Debugf("buckets found: %d", len(entries))
f.buckets.Lock() f.buckets.Lock()
for _, entry := range entries { for _, entry := range entries {

10
weed/filer/filer_conf.go

@ -5,7 +5,7 @@ import (
"context" "context"
"io" "io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
@ -36,7 +36,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
return nil return nil
} }
glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
log.Errorf("read filer conf entry %s: %v", filerConfPath, err)
return return
} }
@ -46,7 +46,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) { func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks) data, err := filer.readEntry(chunks)
if err != nil { if err != nil {
glog.Errorf("read filer conf content: %v", err)
log.Errorf("read filer conf content: %v", err)
return return
} }
@ -60,7 +60,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
err = proto.UnmarshalText(string(data), conf) err = proto.UnmarshalText(string(data), conf)
if err != nil { if err != nil {
glog.Errorf("unable to parse filer conf: %v", err)
log.Errorf("unable to parse filer conf: %v", err)
// this is not recoverable // this is not recoverable
return nil return nil
} }
@ -85,7 +85,7 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil { if err != nil {
glog.Errorf("put location prefix: %v", err)
log.Errorf("put location prefix: %v", err)
} }
return return
} }

14
weed/filer/filer_delete_entry.go

@ -4,7 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -33,7 +33,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
var dirHardLinkIds []HardLinkId var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures) dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures)
if err != nil { if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err)
log.Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err)
} }
chunks = append(chunks, dirChunks...) chunks = append(chunks, dirChunks...)
@ -71,12 +71,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
for { for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "") entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
if err != nil { if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
log.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
} }
if lastFileName == "" && !isRecursive && len(entries) > 0 { if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop // only for first iteration in the loop
glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
log.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
} }
@ -107,7 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
} }
} }
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
log.Tracef("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
@ -120,7 +120,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
log.Tracef("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { if storeDeletionErr := f.Store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr) return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@ -139,7 +139,7 @@ func (f *Filer) doDeleteCollection(collectionName string) (err error) {
Name: collectionName, Name: collectionName,
}) })
if err != nil { if err != nil {
glog.Infof("delete collection %s: %v", collectionName, err)
log.Infof("delete collection %s: %v", collectionName, err)
} }
return err return err
}) })

8
weed/filer/filer_deletion.go

@ -4,7 +4,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
@ -54,10 +54,10 @@ func (f *Filer) loopProcessingDeletion() {
_, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
if err != nil { if err != nil {
if !strings.Contains(err.Error(), "already deleted") { if !strings.Contains(err.Error(), "already deleted") {
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
log.Infof("deleting fileIds len=%d error: %v", deletionCount, err)
} }
} else { } else {
glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
log.Debugf("deleting fileIds len=%d", deletionCount)
} }
} }
}) })
@ -76,7 +76,7 @@ func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
} }
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil { if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
} }
for _, dChunk := range dataChunks { for _, dChunk := range dataChunks {
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())

8
weed/filer/filer_notify.go

@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -54,7 +54,7 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
} }
if notification.Queue != nil { if notification.Queue != nil {
glog.V(3).Infof("notifying entry update %v", fullpath)
log.Tracef("notifying entry update %v", fullpath)
notification.Queue.SendMessage(fullpath, eventNotification) notification.Queue.SendMessage(fullpath, eventNotification)
} }
@ -73,7 +73,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
} }
data, err := proto.Marshal(event) data, err := proto.Marshal(event)
if err != nil { if err != nil {
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return return
} }
@ -96,7 +96,7 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
for { for {
if err := f.appendToFile(targetFile, buf); err != nil { if err := f.appendToFile(targetFile, buf); err != nil {
glog.V(1).Infof("log write failed %s: %v", targetFile, err)
log.Debugf("log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond) time.Sleep(737 * time.Millisecond)
} else { } else {
break break

8
weed/filer/filer_on_meta_event.go

@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"math" "math"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -22,7 +22,7 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse)
return return
} }
glog.V(0).Infof("procesing %v", event)
log.Infof("procesing %v", event)
if entry.Name == FilerConfName { if entry.Name == FilerConfName {
f.reloadFilerConfiguration(entry) f.reloadFilerConfiguration(entry)
} }
@ -42,7 +42,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf() fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks) err := fc.loadFromChunks(f, entry.Chunks)
if err != nil { if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
log.Errorf("read filer conf chunks: %v", err)
return return
} }
f.FilerConf = fc f.FilerConf = fc
@ -54,7 +54,7 @@ func (f *Filer) LoadFilerConf() {
return fc.loadFromFiler(f) return fc.loadFromFiler(f)
}) })
if err != nil { if err != nil {
glog.Errorf("read filer conf: %v", err)
log.Errorf("read filer conf: %v", err)
return return
} }
f.FilerConf = fc f.FilerConf = fc

6
weed/filer/filerstore_hardlink.go

@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -54,12 +54,12 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
value, err := fsw.KvGet(ctx, key) value, err := fsw.KvGet(ctx, key)
if err != nil { if err != nil {
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err return err
} }
if err = entry.DecodeAttributesAndChunks(value); err != nil { if err = entry.DecodeAttributesAndChunks(value); err != nil {
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err return err
} }

8
weed/filer/leveldb/leveldb_store.go

@ -10,7 +10,7 @@ import (
leveldb_util "github.com/syndtr/goleveldb/leveldb/util" leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -37,7 +37,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre
} }
func (store *LevelDBStore) initialize(dir string) (err error) { func (store *LevelDBStore) initialize(dir string) (err error) {
glog.Infof("filer store dir: %s", dir)
log.Infof("filer store dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil { if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
} }
@ -53,7 +53,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
store.db, err = leveldb.RecoverFile(dir, opts) store.db, err = leveldb.RecoverFile(dir, opts)
} }
if err != nil { if err != nil {
glog.Infof("filer store open dir %s: %v", dir, err)
log.Infof("filer store open dir %s: %v", dir, err)
return return
} }
} }
@ -193,7 +193,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we
} }
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry) entries = append(entries, entry)

8
weed/filer/leveldb2/leveldb2_store.go

@ -13,7 +13,7 @@ import (
"os" "os"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util" weed_util "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -37,7 +37,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr
} }
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
glog.Infof("filer store leveldb2 dir: %s", dir)
log.Infof("filer store leveldb2 dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil { if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
} }
@ -56,7 +56,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
db, dbErr = leveldb.RecoverFile(dbFolder, opts) db, dbErr = leveldb.RecoverFile(dbFolder, opts)
} }
if dbErr != nil { if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
log.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
return dbErr return dbErr
} }
store.dbs = append(store.dbs, db) store.dbs = append(store.dbs, db)
@ -205,7 +205,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w
// println("list", entry.FullPath, "chunks", len(entry.Chunks)) // println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break break
} }
entries = append(entries, entry) entries = append(entries, entry)

24
weed/filer/meta_aggregator.go

@ -11,7 +11,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer" "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@ -64,7 +64,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
peerSignature, err := ma.readFilerStoreSignature(peer) peerSignature, err := ma.readFilerStoreSignature(peer)
for err != nil { for err != nil {
glog.V(0).Infof("connecting to peer filer %s: %v", peer, err)
log.Infof("connecting to peer filer %s: %v", peer, err)
time.Sleep(1357 * time.Millisecond) time.Sleep(1357 * time.Millisecond)
peerSignature, err = ma.readFilerStoreSignature(peer) peerSignature, err = ma.readFilerStoreSignature(peer)
} }
@ -74,27 +74,27 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
lastTsNs = prevTsNs lastTsNs = prevTsNs
} }
glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
log.Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
var counter int64 var counter int64
var synced bool var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store, event); err != nil { if err := Replay(f.Store, event); err != nil {
glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
log.Errorf("failed to reply metadata change from %v: %v", peer, err)
return return
} }
counter++ counter++
if lastPersistTime.Add(time.Minute).Before(time.Now()) { if lastPersistTime.Add(time.Minute).Before(time.Now()) {
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
log.Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
} else if !synced { } else if !synced {
synced = true synced = true
glog.V(0).Infof("synced with %s", peer)
log.Infof("synced with %s", peer)
} }
lastPersistTime = time.Now() lastPersistTime = time.Now()
counter = 0 counter = 0
} else { } else {
glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
log.Infof("failed to update offset for %v: %v", peer, err)
} }
} }
} }
@ -103,7 +103,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
data, err := proto.Marshal(event) data, err := proto.Marshal(event)
if err != nil { if err != nil {
glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return err return err
} }
dir := event.Directory dir := event.Directory
@ -147,7 +147,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
} }
}) })
if err != nil { if err != nil {
glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
log.Infof("subscribing remote %s meta change: %v", peer, err)
time.Sleep(1733 * time.Millisecond) time.Sleep(1733 * time.Millisecond)
} }
} }
@ -177,7 +177,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
value, err := f.Store.KvGet(context.Background(), key) value, err := f.Store.KvGet(context.Background(), key)
if err == ErrKvNotFound { if err == ErrKvNotFound {
glog.Warningf("readOffset %s not found", peer)
log.Warnf("readOffset %s not found", peer)
return 0, nil return 0, nil
} }
@ -187,7 +187,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
lastTsNs = int64(util.BytesToUint64(value)) lastTsNs = int64(util.BytesToUint64(value))
glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
log.Infof("readOffset %s : %d", peer, lastTsNs)
return return
} }
@ -206,7 +206,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int3
return fmt.Errorf("updateOffset %s : %v", peer, err) return fmt.Errorf("updateOffset %s : %v", peer, err)
} }
glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
log.Tracef("updateOffset %s : %d", peer, lastTsNs)
return return
} }

6
weed/filer/meta_replay.go

@ -3,7 +3,7 @@ package filer
import ( import (
"context" "context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -14,7 +14,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
var newEntry *Entry var newEntry *Entry
if message.OldEntry != nil { if message.OldEntry != nil {
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath)
log.Tracef("deleting %v", oldPath)
if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil {
return err return err
} }
@ -26,7 +26,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
dir = message.NewParentPath dir = message.NewParentPath
} }
key := util.NewFullPath(dir, message.NewEntry.Name) key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
newEntry = FromPbEntry(dir, message.NewEntry) newEntry = FromPbEntry(dir, message.NewEntry)
if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil {
return err return err

8
weed/filer/mongodb/mongodb_store.go

@ -4,7 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson"
@ -134,7 +134,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name} var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil { if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("find %s: %v", fullpath, err)
log.Errorf("find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound return nil, filer_pb.ErrNotFound
} }
@ -205,7 +205,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
} }
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break break
} }
@ -213,7 +213,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
} }
if err := cur.Close(ctx); err != nil { if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
log.Infof("list iterator close: %v", err)
} }
return entries, err return entries, err

4
weed/filer/mongodb/mongodb_store_kv.go

@ -4,7 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo"
) )
@ -36,7 +36,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
var where = bson.M{"directory": dir, "name": name} var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil { if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("kv get: %v", err)
log.Errorf("kv get: %v", err)
return nil, filer.ErrKvNotFound return nil, filer.ErrKvNotFound
} }

24
weed/filer/reader_at.go

@ -7,7 +7,7 @@ import (
"math/rand" "math/rand"
"sync" "sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache" "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@ -54,7 +54,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
locations = resp.LocationsMap[vid] locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 { if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", fileId)
log.Infof("failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId) return fmt.Errorf("failed to locate %s", fileId)
} }
vicCacheLock.Lock() vicCacheLock.Lock()
@ -101,7 +101,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
c.readerLock.Lock() c.readerLock.Lock()
defer c.readerLock.Unlock() defer c.readerLock.Unlock()
glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
log.Tracef("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
return c.doReadAt(p[n:], offset+int64(n)) return c.doReadAt(p[n:], offset+int64(n))
} }
@ -121,7 +121,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
} }
if startOffset < chunk.LogicOffset { if startOffset < chunk.LogicOffset {
gap := int(chunk.LogicOffset - startOffset) gap := int(chunk.LogicOffset - startOffset)
glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap))
log.Tracef("zero [%d,%d)", startOffset, startOffset+int64(gap))
n += int(min(int64(gap), remaining)) n += int(min(int64(gap), remaining))
startOffset, remaining = chunk.LogicOffset, remaining-int64(gap) startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
if remaining <= 0 { if remaining <= 0 {
@ -133,10 +133,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
if chunkStart >= chunkStop { if chunkStart >= chunkStop {
continue continue
} }
glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
log.Tracef("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
buffer, err = c.readFromWholeChunkData(chunk, nextChunk) buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
if err != nil { if err != nil {
glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
log.Errorf("fetching chunk %+v: %v\n", chunk, err)
return return
} }
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
@ -145,11 +145,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
} }
glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
log.Tracef("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
if err == nil && remaining > 0 && c.fileSize > startOffset { if err == nil && remaining > 0 && c.fileSize > startOffset {
delta := int(min(remaining, c.fileSize-startOffset)) delta := int(min(remaining, c.fileSize-startOffset))
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
log.Tracef("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
n += delta n += delta
} }
@ -194,11 +194,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) { return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) {
glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
log.Tracef("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
if data != nil { if data != nil {
glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
log.Tracef("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
} else { } else {
var err error var err error
data, err = c.doFetchFullChunkData(chunkView) data, err = c.doFetchFullChunkData(chunkView)
@ -213,11 +213,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) { func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) {
glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId)
log.Tracef("+ doFetchFullChunkData %s", chunkView.FileId)
data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId)
log.Tracef("- doFetchFullChunkData %s", chunkView.FileId)
return data, err return data, err

4
weed/filer/redis/universal_redis_store.go

@ -10,7 +10,7 @@ import (
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -170,7 +170,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
path := util.NewFullPath(string(fullpath), fileName) path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path) entry, err := store.FindEntry(ctx, path)
if err != nil { if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
log.Infof("list %s : %v", path, err)
} else { } else {
if entry.TtlSec > 0 { if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {

4
weed/filer/redis2/universal_redis_store.go

@ -8,7 +8,7 @@ import (
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -149,7 +149,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, ful
path := util.NewFullPath(string(fullpath), fileName) path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path) entry, err := store.FindEntry(ctx, path)
if err != nil { if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
log.Infof("list %s : %v", path, err)
} else { } else {
if entry.TtlSec > 0 { if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {

16
weed/filer/stream.go

@ -7,7 +7,7 @@ import (
"math" "math"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
@ -24,7 +24,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
urlStrings, err := masterClient.LookupFileId(chunkView.FileId) urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err return err
} }
fileId2Url[chunkView.FileId] = urlStrings fileId2Url[chunkView.FileId] = urlStrings
@ -36,12 +36,12 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err != nil { if err != nil {
glog.Errorf("read chunk: %v", err)
log.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err) return fmt.Errorf("read chunk: %v", err)
} }
_, err = w.Write(data) _, err = w.Write(data)
if err != nil { if err != nil {
glog.Errorf("write chunk: %v", err)
log.Errorf("write chunk: %v", err)
return fmt.Errorf("write chunk: %v", err) return fmt.Errorf("write chunk: %v", err)
} }
} }
@ -65,7 +65,7 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
for _, chunkView := range chunkViews { for _, chunkView := range chunkViews {
urlStrings, err := lookupFileIdFn(chunkView.FileId) urlStrings, err := lookupFileIdFn(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return nil, err return nil, err
} }
@ -175,7 +175,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlStrings, err := c.lookupFileId(chunkView.FileId) urlStrings, err := c.lookupFileId(chunkView.FileId)
if err != nil { if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err return err
} }
var buffer bytes.Buffer var buffer bytes.Buffer
@ -188,7 +188,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
break break
} }
if err != nil { if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
log.Debugf("read %s failed, err: %v", chunkView.FileId, err)
buffer.Reset() buffer.Reset()
} else { } else {
break break
@ -201,7 +201,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
c.bufferPos = 0 c.bufferPos = 0
c.bufferOffset = chunkView.LogicOffset c.bufferOffset = chunkView.LogicOffset
// glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
// log.Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
return nil return nil
} }

70
weed/filesys/dir.go

@ -13,7 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -48,12 +48,12 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
if dir.FullPath() == dir.wfs.option.FilerMountRootPath { if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr) dir.setRootDirAttributes(attr)
glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
log.Tracef("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil return nil
} }
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
log.Tracef("dir Attr %s,err: %+v", dir.FullPath(), err)
return err return err
} }
@ -64,14 +64,14 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Gid = dir.entry.Attributes.Gid attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid attr.Uid = dir.entry.Attributes.Uid
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
log.Tracef("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil return nil
} }
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
log.Tracef("dir Getxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
return err return err
@ -96,7 +96,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level // fsync works at OS level
// write the file chunks to the filerGrpcAddress // write the file chunks to the filerGrpcAddress
glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req)
log.Tracef("dir %s fsync %+v", dir.FullPath(), req)
return nil return nil
} }
@ -146,7 +146,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
OExcl: req.Flags&fuse.OpenExclusive != 0, OExcl: req.Flags&fuse.OpenExclusive != 0,
Signatures: []int32{dir.wfs.signature}, Signatures: []int32{dir.wfs.signature},
} }
glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
log.Debugf("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -157,7 +157,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") { if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST return fuse.EEXIST
} }
glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.EIO return fuse.EIO
} }
@ -182,21 +182,21 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) { func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if req.Mode&os.ModeNamedPipe != 0 { if req.Mode&os.ModeNamedPipe != 0 {
glog.V(1).Infof("mknod named pipe %s", req.String())
log.Debugf("mknod named pipe %s", req.String())
return nil, fuse.ENOSYS return nil, fuse.ENOSYS
} }
if req.Mode&req.Mode&os.ModeSocket != 0 { if req.Mode&req.Mode&os.ModeSocket != 0 {
glog.V(1).Infof("mknod socket %s", req.String())
log.Debugf("mknod socket %s", req.String())
return nil, fuse.ENOSYS return nil, fuse.ENOSYS
} }
// not going to support mknod for normal files either // not going to support mknod for normal files either
glog.V(1).Infof("mknod %s", req.String())
log.Debugf("mknod %s", req.String())
return nil, fuse.ENOSYS return nil, fuse.ENOSYS
} }
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
log.Tracef("mkdir %s: %s", dir.FullPath(), req.Name)
newEntry := &filer_pb.Entry{ newEntry := &filer_pb.Entry{
Name: req.Name, Name: req.Name,
@ -221,9 +221,9 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
Signatures: []int32{dir.wfs.signature}, Signatures: []int32{dir.wfs.signature},
} }
glog.V(1).Infof("mkdir: %v", request)
log.Debugf("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil { if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return err return err
} }
@ -238,20 +238,20 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return node, nil return node, nil
} }
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return nil, fuse.EIO return nil, fuse.EIO
} }
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
log.Tracef("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name) fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
dirPath := util.FullPath(dir.FullPath()) dirPath := util.FullPath(dir.FullPath())
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath) visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
if visitErr != nil { if visitErr != nil {
glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
log.Errorf("dir Lookup %s: %v", dirPath, visitErr)
return nil, fuse.EIO return nil, fuse.EIO
} }
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
@ -261,14 +261,14 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
entry := cachedEntry.ToProtoEntry() entry := cachedEntry.ToProtoEntry()
if entry == nil { if entry == nil {
// glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
// log.Tracef("dir Lookup cache miss %s", fullFilePath)
entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath) entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil { if err != nil {
glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
log.Debugf("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT return nil, fuse.ENOENT
} }
} else { } else {
glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
log.Tracef("dir Lookup cache hit %s", fullFilePath)
} }
if entry != nil { if entry != nil {
@ -293,13 +293,13 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil return node, nil
} }
glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
log.Tracef("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT return nil, fuse.ENOENT
} }
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
log.Tracef("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error { processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
fullpath := util.NewFullPath(dir.FullPath(), entry.Name) fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
@ -316,12 +316,12 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath()) dirPath := util.FullPath(dir.FullPath())
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil { if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
log.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO return nil, fuse.EIO
} }
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32)) listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
if listErr != nil { if listErr != nil {
glog.Errorf("list meta cache: %v", listErr)
log.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO return nil, fuse.EIO
} }
for _, cachedEntry := range listedEntries { for _, cachedEntry := range listedEntries {
@ -352,11 +352,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
} }
// first, ensure the filer store can correctly delete // first, ensure the filer store can correctly delete
glog.V(3).Infof("remove file: %v", req)
log.Tracef("remove file: %v", req)
isDeleteData := entry.HardLinkCounter <= 1 isDeleteData := entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature}) err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil { if err != nil {
glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
log.Tracef("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.ENOENT return fuse.ENOENT
} }
@ -389,11 +389,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
glog.V(3).Infof("remove directory entry: %v", req)
log.Tracef("remove directory entry: %v", req)
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature}) err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil { if err != nil {
glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
if strings.Contains(err.Error(), "non-empty") { if strings.Contains(err.Error(), "non-empty") {
return fuse.EEXIST return fuse.EEXIST
} }
@ -410,7 +410,7 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
log.Tracef("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
return err return err
@ -438,7 +438,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
log.Tracef("dir Setxattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
return err return err
@ -454,7 +454,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
log.Tracef("dir Removexattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
return err return err
@ -470,7 +470,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
log.Tracef("dir Listxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil { if err := dir.maybeLoadEntry(); err != nil {
return err return err
@ -485,7 +485,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
} }
func (dir *Dir) Forget() { func (dir *Dir) Forget() {
glog.V(4).Infof("Forget dir %s", dir.FullPath())
log.Tracef("Forget dir %s", dir.FullPath())
dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath())) dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
} }
@ -517,10 +517,10 @@ func (dir *Dir) saveEntry() error {
Signatures: []int32{dir.wfs.signature}, Signatures: []int32{dir.wfs.signature},
} }
glog.V(1).Infof("save dir entry: %v", request)
log.Debugf("save dir entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request) _, err := client.UpdateEntry(context.Background(), request)
if err != nil { if err != nil {
glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
log.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO return fuse.EIO
} }

16
weed/filesys/dir_link.go

@ -8,7 +8,7 @@ import (
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
@ -26,10 +26,10 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
oldFile, ok := old.(*File) oldFile, ok := old.(*File)
if !ok { if !ok {
glog.Errorf("old node is not a file: %+v", old)
log.Errorf("old node is not a file: %+v", old)
} }
glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
log.Tracef("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
if _, err := oldFile.maybeLoadEntry(ctx); err != nil { if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
return nil, err return nil, err
@ -69,13 +69,13 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil { if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry)) dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
if err := filer_pb.CreateEntry(client, request); err != nil { if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO return fuse.EIO
} }
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
@ -96,7 +96,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
log.Tracef("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(), Directory: dir.FullPath(),
@ -121,7 +121,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil { if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
log.Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO return fuse.EIO
} }
@ -147,7 +147,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL) return "", fuse.Errno(syscall.EINVAL)
} }
glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
log.Tracef("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
return entry.Attributes.SymlinkTarget, nil return entry.Attributes.SymlinkTarget, nil

14
weed/filesys/dir_rename.go

@ -6,7 +6,7 @@ import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -18,12 +18,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
newPath := util.NewFullPath(newDir.FullPath(), req.NewName) newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
oldPath := util.NewFullPath(dir.FullPath(), req.OldName) oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
log.Tracef("dir Rename %s => %s", oldPath, newPath)
// find local old entry // find local old entry
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath) oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
if err != nil { if err != nil {
glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
log.Errorf("dir Rename can not find source %s : %v", oldPath, err)
return fuse.ENOENT return fuse.ENOENT
} }
@ -41,7 +41,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
_, err := client.AtomicRenameEntry(ctx, request) _, err := client.AtomicRenameEntry(ctx, request)
if err != nil { if err != nil {
glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
log.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
return fuse.EIO return fuse.EIO
} }
@ -49,18 +49,18 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
}) })
if err != nil { if err != nil {
glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO return fuse.EIO
} }
// TODO: replicate renaming logic on filer // TODO: replicate renaming logic on filer
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil { if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO return fuse.EIO
} }
oldEntry.FullPath = newPath oldEntry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil { if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO return fuse.EIO
} }

8
weed/filesys/dirty_page.go

@ -7,7 +7,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -41,7 +41,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) { func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
log.Tracef("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold. // this is more than what buffer can hold.
@ -111,7 +111,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
reader = io.LimitReader(reader, size) reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset) chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil { if err != nil {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
log.Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
pages.chunkSaveErrChan <- err pages.chunkSaveErrChan <- err
return return
} }
@ -120,7 +120,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
pages.chunkAddLock.Lock() pages.chunkAddLock.Lock()
defer pages.chunkAddLock.Unlock() defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk}) pages.f.addChunks([]*filer_pb.FileChunk{chunk})
glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
log.Tracef("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
} }
if pages.f.wfs.concurrentWriters != nil { if pages.f.wfs.concurrentWriters != nil {

8
weed/filesys/dirty_page_interval.go

@ -30,12 +30,12 @@ func (list *IntervalLinkedList) Size() int64 {
return list.Tail.Offset + list.Tail.Size - list.Head.Offset return list.Tail.Offset + list.Tail.Size - list.Head.Offset
} }
func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
// glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
// log.Tracef("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
list.Tail.Next = node list.Tail.Next = node
list.Tail = node list.Tail = node
} }
func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
// glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
// log.Tracef("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
node.Next = list.Head node.Next = list.Head
list.Head = node list.Head = node
} }
@ -46,7 +46,7 @@ func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
if nodeStart < nodeStop { if nodeStart < nodeStop {
// glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
// log.Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
} }
@ -144,7 +144,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
} }
if prevList != nil && nextList != nil { if prevList != nil && nextList != nil {
// glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
// log.Tracef("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
prevList.Tail.Next = nextList.Head prevList.Tail.Next = nextList.Head
prevList.Tail = nextList.Tail prevList.Tail = nextList.Tail
c.removeList(nextList) c.removeList(nextList)

40
weed/filesys/file.go

@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -45,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) { func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
log.Tracef("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
entry := file.entry entry := file.entry
if file.isOpen <= 0 || entry == nil { if file.isOpen <= 0 || entry == nil {
@ -60,7 +60,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
attr.Size = filer.FileSize(entry) attr.Size = filer.FileSize(entry)
if file.isOpen > 0 { if file.isOpen > 0 {
attr.Size = entry.Attributes.FileSize attr.Size = entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
log.Tracef("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
} }
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
@ -78,7 +78,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
glog.V(4).Infof("file Getxattr %s", file.fullpath())
log.Tracef("file Getxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx) entry, err := file.maybeLoadEntry(ctx)
if err != nil { if err != nil {
@ -90,13 +90,13 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
log.Tracef("file %v open %+v", file.fullpath(), req)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle) resp.Handle = fuse.HandleID(handle.handle)
glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
log.Tracef("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil return handle, nil
@ -104,7 +104,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
log.Tracef("%v file setattr %+v", file.fullpath(), req)
_, err := file.maybeLoadEntry(ctx) _, err := file.maybeLoadEntry(ctx)
if err != nil { if err != nil {
@ -123,7 +123,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() { if req.Valid.Size() {
glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
log.Tracef("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer.FileSize(file.entry) { if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath) // fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
@ -135,10 +135,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
int64Size = int64(req.Size) - chunk.Offset int64Size = int64(req.Size) - chunk.Offset
if int64Size > 0 { if int64Size > 0 {
chunks = append(chunks, chunk) chunks = append(chunks, chunk)
glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
log.Tracef("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
chunk.Size = uint64(int64Size) chunk.Size = uint64(int64Size)
} else { } else {
glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
log.Tracef("truncated whole chunk %+v\n", chunk.GetFileIdString())
truncatedChunks = append(truncatedChunks, chunk) truncatedChunks = append(truncatedChunks, chunk)
} }
} }
@ -195,7 +195,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
log.Tracef("file Setxattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx) entry, err := file.maybeLoadEntry(ctx)
if err != nil { if err != nil {
@ -212,7 +212,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
log.Tracef("file Removexattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx) entry, err := file.maybeLoadEntry(ctx)
if err != nil { if err != nil {
@ -229,7 +229,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
glog.V(4).Infof("file Listxattr %s", file.fullpath())
log.Tracef("file Listxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx) entry, err := file.maybeLoadEntry(ctx)
if err != nil { if err != nil {
@ -247,14 +247,14 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level // fsync works at OS level
// write the file chunks to the filerGrpcAddress // write the file chunks to the filerGrpcAddress
glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
log.Tracef("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil return nil
} }
func (file *File) Forget() { func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name) t := util.NewFullPath(file.dir.FullPath(), file.Name)
glog.V(4).Infof("Forget file %s", t)
log.Tracef("Forget file %s", t)
file.wfs.fsNodeCache.DeleteFsNode(t) file.wfs.fsNodeCache.DeleteFsNode(t)
} }
@ -271,13 +271,13 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
} }
entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name) entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil { if err != nil {
glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Tracef("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return entry, err return entry, err
} }
if entry != nil { if entry != nil {
file.setEntry(entry) file.setEntry(entry)
} else { } else {
glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Warnf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
} }
return entry, nil return entry, nil
} }
@ -319,7 +319,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
file.reader = nil file.reader = nil
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
log.Tracef("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, newChunks...) file.entry.Chunks = append(file.entry.Chunks, newChunks...)
} }
@ -348,10 +348,10 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
Signatures: []int32{file.wfs.signature}, Signatures: []int32{file.wfs.signature},
} }
glog.V(4).Infof("save file entry: %v", request)
log.Tracef("save file entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request) _, err := client.UpdateEntry(context.Background(), request)
if err != nil { if err != nil {
glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO return fuse.EIO
} }

34
weed/filesys/filehandle.go

@ -14,7 +14,7 @@ import (
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -57,7 +57,7 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
log.Tracef("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
fh.RLock() fh.RLock()
defer fh.RUnlock() defer fh.RUnlock()
@ -82,12 +82,12 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
} }
if err != nil { if err != nil {
glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
log.Warnf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
return fuse.EIO return fuse.EIO
} }
if totalRead > int64(len(buff)) { if totalRead > int64(len(buff)) {
glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
log.Warnf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
totalRead = min(int64(len(buff)), totalRead) totalRead = min(int64(len(buff)), totalRead)
} }
// resp.Data = buff[:totalRead] // resp.Data = buff[:totalRead]
@ -106,7 +106,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileSize := int64(filer.FileSize(fh.f.entry)) fileSize := int64(filer.FileSize(fh.f.entry))
if fileSize == 0 { if fileSize == 0 {
glog.V(1).Infof("empty fh %v", fh.f.fullpath())
log.Debugf("empty fh %v", fh.f.fullpath())
return 0, io.EOF return 0, io.EOF
} }
@ -127,10 +127,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
totalRead, err := fh.f.reader.ReadAt(buff, offset) totalRead, err := fh.f.reader.ReadAt(buff, offset)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
log.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
} }
glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
log.Tracef("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err return int64(totalRead), err
} }
@ -150,7 +150,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
} }
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize))) fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
log.Tracef("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
fh.dirtyPages.AddPage(req.Offset, data) fh.dirtyPages.AddPage(req.Offset, data)
@ -169,7 +169,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
log.Tracef("Release %v fh %d", fh.f.fullpath(), fh.handle)
fh.Lock() fh.Lock()
defer fh.Unlock() defer fh.Unlock()
@ -177,7 +177,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.f.isOpen-- fh.f.isOpen--
if fh.f.isOpen < 0 { if fh.f.isOpen < 0 {
glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
log.Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0 fh.f.isOpen = 0
return nil return nil
} }
@ -185,7 +185,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
if fh.f.isOpen == 0 { if fh.f.isOpen == 0 {
if err := fh.doFlush(ctx, req.Header); err != nil { if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
log.Errorf("Release doFlush %s: %v", fh.f.Name, err)
} }
// stop the goroutine // stop the goroutine
@ -211,7 +211,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
// flush works at fh level // flush works at fh level
// send the data to the OS // send the data to the OS
glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
log.Tracef("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
fh.dirtyPages.saveExistingPagesToStorage() fh.dirtyPages.saveExistingPagesToStorage()
@ -250,9 +250,9 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
Signatures: []int32{fh.f.wfs.signature}, Signatures: []int32{fh.f.wfs.signature},
} }
glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
log.Tracef("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks { for i, chunk := range fh.f.entry.Chunks {
glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
log.Tracef("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
} }
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks) manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
@ -261,7 +261,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks) chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
if manifestErr != nil { if manifestErr != nil {
// not good, but should be ok // not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
log.Infof("MaybeManifestize: %v", manifestErr)
} }
fh.f.entry.Chunks = append(chunks, manifestChunks...) fh.f.entry.Chunks = append(chunks, manifestChunks...)
@ -269,7 +269,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry) defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil { if err := filer_pb.CreateEntry(client, request); err != nil {
glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
log.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
} }
@ -283,7 +283,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
} }
if err != nil { if err != nil {
glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
log.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
return fuse.EIO return fuse.EIO
} }

8
weed/filesys/meta_cache/meta_cache.go

@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb" "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree" "github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
) )
@ -44,7 +44,7 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
} }
if err := store.Initialize(config, ""); err != nil { if err := store.Initialize(config, ""); err != nil {
glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
log.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
} }
return filer.NewFilerStoreWrapper(store) return filer.NewFilerStoreWrapper(store)
@ -72,7 +72,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion // skip the unnecessary deletion
// leave the update to the following InsertEntry operation // leave the update to the following InsertEntry operation
} else { } else {
glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name())
log.Tracef("DeleteEntry %s/%s", oldPath, oldPath.Name())
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err return err
} }
@ -85,7 +85,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
if newEntry != nil { if newEntry != nil {
newDir, _ := newEntry.DirAndName() newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) { if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name())
log.Tracef("InsertEntry %s/%s", newDir, newEntry.Name())
if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil { if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err return err
} }

6
weed/filesys/meta_cache/meta_cache_init.go

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -14,13 +14,13 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) { return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
log.Tracef("ReadDirAllEntries %s ...", path)
util.Retry("ReadDirAllEntries", func() error { util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry) entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil { if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
log.Infof("read %s: %v", entry.FullPath, err)
return err return err
} }
if entry.IsDirectory() { if entry.IsDirectory() {

10
weed/filesys/meta_cache/meta_cache_subscribe.go

@ -7,7 +7,7 @@ import (
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -28,7 +28,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
var newEntry *filer.Entry var newEntry *filer.Entry
if message.OldEntry != nil { if message.OldEntry != nil {
oldPath = util.NewFullPath(dir, message.OldEntry.Name) oldPath = util.NewFullPath(dir, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath)
log.Tracef("deleting %v", oldPath)
} }
if message.NewEntry != nil { if message.NewEntry != nil {
@ -36,7 +36,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
dir = message.NewParentPath dir = message.NewParentPath
} }
key := util.NewFullPath(dir, message.NewEntry.Name) key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
newEntry = filer.FromPbEntry(dir, message.NewEntry) newEntry = filer.FromPbEntry(dir, message.NewEntry)
} }
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry) err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
@ -73,13 +73,13 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
} }
if err := processEventFn(resp); err != nil { if err := processEventFn(resp); err != nil {
glog.Fatalf("process %v: %v", resp, err)
log.Fatalf("process %v: %v", resp, err)
} }
lastTsNs = resp.TsNs lastTsNs = resp.TsNs
} }
}) })
if err != nil { if err != nil {
glog.Errorf("subscribing filer meta change: %v", err)
log.Errorf("subscribing filer meta change: %v", err)
} }
time.Sleep(time.Second) time.Sleep(time.Second)
} }

16
weed/filesys/wfs.go

@ -17,7 +17,7 @@ import (
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache" "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@ -128,7 +128,7 @@ func (wfs *WFS) Root() (fs.Node, error) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
fullpath := file.fullpath() fullpath := file.fullpath()
glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
log.Tracef("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock() wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock() defer wfs.handlesLock.Unlock()
@ -156,7 +156,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock() wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock() defer wfs.handlesLock.Unlock()
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
log.Tracef("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.handles, fullpath.AsInode()) delete(wfs.handles, fullpath.AsInode())
@ -166,7 +166,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
// Statfs is called to obtain file system metadata. Implements fuse.FSStatfser // Statfs is called to obtain file system metadata. Implements fuse.FSStatfser
func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error { func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
glog.V(4).Infof("reading fs stats: %+v", req)
log.Tracef("reading fs stats: %+v", req)
if wfs.stats.lastChecked < time.Now().Unix()-20 { if wfs.stats.lastChecked < time.Now().Unix()-20 {
@ -178,13 +178,13 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec), Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
} }
glog.V(4).Infof("reading filer stats: %+v", request)
log.Tracef("reading filer stats: %+v", request)
resp, err := client.Statistics(context.Background(), request) resp, err := client.Statistics(context.Background(), request)
if err != nil { if err != nil {
glog.V(0).Infof("reading filer stats %v: %v", request, err)
log.Infof("reading filer stats %v: %v", request, err)
return err return err
} }
glog.V(4).Infof("read filer stats: %+v", resp)
log.Tracef("read filer stats: %+v", resp)
wfs.stats.TotalSize = resp.TotalSize wfs.stats.TotalSize = resp.TotalSize
wfs.stats.UsedSize = resp.UsedSize wfs.stats.UsedSize = resp.UsedSize
@ -194,7 +194,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil return nil
}) })
if err != nil { if err != nil {
glog.V(0).Infof("filer Statistics: %v", err)
log.Infof("filer Statistics: %v", err)
return err return err
} }
} }

6
weed/filesys/wfs_deletion.go

@ -6,7 +6,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
) )
@ -24,7 +24,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
} }
dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk) dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
if manifestResolveErr != nil { if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
} }
for _, dChunk := range dataChunks { for _, dChunk := range dataChunks {
fileIds = append(fileIds, dChunk.GetFileIdString()) fileIds = append(fileIds, dChunk.GetFileIdString())
@ -49,7 +49,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
m := make(map[string]operation.LookupResult) m := make(map[string]operation.LookupResult)
glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
log.Tracef("deleteFileIds lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids, VolumeIds: vids,
}) })

8
weed/filesys/wfs_write.go

@ -6,7 +6,7 @@ import (
"io" "io"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
@ -32,7 +32,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
resp, err := client.AssignVolume(context.Background(), request) resp, err := client.AssignVolume(context.Background(), request)
if err != nil { if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err return err
} }
if resp.Error != "" { if resp.Error != "" {
@ -55,11 +55,11 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth) uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil { if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
log.Infof("upload data %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload data: %v", err) return nil, "", "", fmt.Errorf("upload data: %v", err)
} }
if uploadResult.Error != "" { if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error) return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
} }

2
weed/filesys/xattr.go

@ -111,7 +111,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
fullpath := util.NewFullPath(dir, name) fullpath := util.NewFullPath(dir, name)
// glog.V(3).Infof("read entry cache miss %s", fullpath)
// log.Tracef("read entry cache miss %s", fullpath)
// read from async meta cache // read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir)) meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))

2
weed/glog/README

@ -24,7 +24,7 @@ The comment from glog.go introduces the ideas:
glog.Info("Prepare to repel boarders") glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
log.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation See the documentation for the V function for an explanation
of these examples: of these examples:

2
weed/glog/glog.go

@ -22,7 +22,7 @@
// //
// glog.Info("Prepare to repel boarders") // glog.Info("Prepare to repel boarders")
// //
// glog.Fatalf("Initialization failed: %s", err)
// log.Fatalf("Initialization failed: %s", err)
// //
// See the documentation for the V function for an explanation of these examples: // See the documentation for the V function for an explanation of these examples:
// //

4
weed/images/resizing.go

@ -10,7 +10,7 @@ import (
"github.com/disintegration/imaging" "github.com/disintegration/imaging"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
) )
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) { func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@ -50,7 +50,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
} }
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy() return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
} else { } else {
glog.Error(err)
log.Error(err)
} }
return read, 0, 0 return read, 0, 0
} }

8
weed/messaging/broker/broker_append.go

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -34,7 +34,7 @@ func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messag
_, err := client.AppendToEntry(context.Background(), request) _, err := client.AppendToEntry(context.Background(), request)
if err != nil { if err != nil {
glog.V(0).Infof("append to file %v: %v", request, err)
log.Infof("append to file %v: %v", request, err)
return err return err
} }
@ -61,7 +61,7 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
resp, err := client.AssignVolume(context.Background(), request) resp, err := client.AssignVolume(context.Background(), request)
if err != nil { if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err return err
} }
if resp.Error != "" { if resp.Error != "" {
@ -98,7 +98,7 @@ func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient
if err == io.EOF { if err == io.EOF {
return return
} }
glog.V(0).Infof("fail to connect to %s: %v", filer, err)
log.Infof("fail to connect to %s: %v", filer, err)
} else { } else {
break break
} }

10
weed/messaging/broker/broker_grpc_server_discovery.go

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@ -78,11 +78,11 @@ func (broker *MessageBroker) checkFilers() {
found = true found = true
break break
} }
glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
log.Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
time.Sleep(time.Second) time.Sleep(time.Second)
} }
} }
glog.V(0).Infof("received master list: %s", masters)
log.Infof("received master list: %s", masters)
// contact each masters for filers // contact each masters for filers
var filers []string var filers []string
@ -105,11 +105,11 @@ func (broker *MessageBroker) checkFilers() {
found = true found = true
break break
} }
glog.V(0).Infof("failed to list filers: %v", err)
log.Infof("failed to list filers: %v", err)
time.Sleep(time.Second) time.Sleep(time.Second)
} }
} }
glog.V(0).Infof("received filer list: %s", filers)
log.Infof("received filer list: %s", filers)
broker.option.Filers = filers broker.option.Filers = filers

10
weed/messaging/broker/broker_grpc_server_publish.go

@ -8,7 +8,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
) )
@ -65,7 +65,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
for { for {
// println("recv") // println("recv")
in, err := stream.Recv() in, err := stream.Recv()
// glog.V(0).Infof("recieved %v err: %v", in, err)
// log.Infof("recieved %v err: %v", in, err)
if err == io.EOF { if err == io.EOF {
return nil return nil
} }
@ -81,7 +81,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
data, err := proto.Marshal(in.Data) data, err := proto.Marshal(in.Data)
if err != nil { if err != nil {
glog.Errorf("marshall error: %v\n", err)
log.Errorf("marshall error: %v\n", err)
continue continue
} }
@ -97,7 +97,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
} }
if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil { if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
glog.V(0).Infof("err writing %s: %v", md5File, err)
log.Infof("err writing %s: %v", md5File, err)
} }
// fmt.Printf("received md5 %X\n", md5hash.Sum(nil)) // fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
@ -105,7 +105,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
// send the close ack // send the close ack
// println("server send ack closing") // println("server send ack closing")
if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil { if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
glog.V(0).Infof("err sending close response: %v", err)
log.Infof("err sending close response: %v", err)
} }
return nil return nil

10
weed/messaging/broker/broker_grpc_server_subscribe.go

@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
) )
@ -76,7 +76,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
Data: m, Data: m,
}) })
if err != nil { if err != nil {
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
log.Infof("=> subscriber %v: %+v", subscriberId, err)
} }
return err return err
} }
@ -84,12 +84,12 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error { eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
m := &messaging_pb.Message{} m := &messaging_pb.Message{}
if err = proto.Unmarshal(logEntry.Data, m); err != nil { if err = proto.Unmarshal(logEntry.Data, m); err != nil {
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
log.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
return err return err
} }
// fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs) // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
if err = eachMessageFn(m); err != nil { if err = eachMessageFn(m); err != nil {
glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
log.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
return err return err
} }
if m.IsClose { if m.IsClose {
@ -122,7 +122,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return isConnected return isConnected
}, eachLogEntryFn) }, eachLogEntryFn)
if err != nil { if err != nil {
glog.Errorf("processed to %v: %v", lastReadTime, err)
log.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond) time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError { if err != log_buffer.ResumeError {
break break

12
weed/messaging/broker/broker_server.go

@ -6,7 +6,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -52,7 +52,7 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
defer cancel() defer cancel()
stream, err := client.KeepConnected(ctx) stream, err := client.KeepConnected(ctx)
if err != nil { if err != nil {
glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err return err
} }
@ -67,24 +67,24 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
Name: broker.option.Ip, Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port), GrpcPort: uint32(broker.option.Port),
}); err != nil { }); err != nil {
glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err return err
} }
// TODO send events of adding/removing topics // TODO send events of adding/removing topics
glog.V(0).Infof("conntected with filer: %v", filer)
log.Infof("conntected with filer: %v", filer)
for { for {
if err := stream.Send(&filer_pb.KeepConnectedRequest{ if err := stream.Send(&filer_pb.KeepConnectedRequest{
Name: broker.option.Ip, Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port), GrpcPort: uint32(broker.option.Port),
}); err != nil { }); err != nil {
glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err return err
} }
// println("send heartbeat") // println("send heartbeat")
if _, err := stream.Recv(); err != nil { if _, err := stream.Recv(); err != nil {
glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err return err
} }
// println("received reply") // println("received reply")

4
weed/messaging/broker/topic_manager.go

@ -6,7 +6,7 @@ import (
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer" "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
) )
@ -65,7 +65,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
) )
if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil { if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
log.Infof("log write failed %s: %v", targetFile, err)
} }
} }
logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() { logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {

6
weed/notification/aws_sqs/aws_sqs_pub.go

@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *AwsSqsPub) GetName() string {
} }
func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
log.Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
log.Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize( return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"), configuration.GetString(prefix+"aws_secret_access_key"),

8
weed/notification/configuration.go

@ -1,7 +1,7 @@
package notification package notification
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -32,11 +32,11 @@ func LoadConfiguration(config *viper.Viper, prefix string) {
for _, queue := range MessageQueues { for _, queue := range MessageQueues {
if config.GetBool(prefix + queue.GetName() + ".enabled") { if config.GetBool(prefix + queue.GetName() + ".enabled") {
if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification for %s: %+v",
log.Fatalf("Failed to initialize notification for %s: %+v",
queue.GetName(), err) queue.GetName(), err)
} }
Queue = queue Queue = queue
glog.V(0).Infof("Configure notification message queue for %s", queue.GetName())
log.Infof("Configure notification message queue for %s", queue.GetName())
return return
} }
} }
@ -50,7 +50,7 @@ func validateOneEnabledQueue(config *viper.Viper) {
if enabledQueue == "" { if enabledQueue == "" {
enabledQueue = queue.GetName() enabledQueue = queue.GetName()
} else { } else {
glog.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
log.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
} }
} }
} }

6
weed/notification/gocdk_pub_sub/gocdk_pub_sub.go

@ -22,7 +22,7 @@ import (
"gocloud.dev/pubsub" "gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs" _ "gocloud.dev/pubsub/awssnssqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
// _ "gocloud.dev/pubsub/azuresb" // _ "gocloud.dev/pubsub/azuresb"
@ -46,10 +46,10 @@ func (k *GoCDKPubSub) GetName() string {
func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
k.topicURL = configuration.GetString(prefix + "topic_url") k.topicURL = configuration.GetString(prefix + "topic_url")
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
log.Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
if err != nil { if err != nil {
glog.Fatalf("Failed to open topic: %v", err)
log.Fatalf("Failed to open topic: %v", err)
} }
k.topic = topic k.topic = topic
return nil return nil

14
weed/notification/google_pub_sub/google_pub_sub.go

@ -6,7 +6,7 @@ import (
"os" "os"
"cloud.google.com/go/pubsub" "cloud.google.com/go/pubsub"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -26,8 +26,8 @@ func (k *GooglePubSub) GetName() string {
} }
func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize( return k.initialize(
configuration.GetString(prefix+"google_application_credentials"), configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"), configuration.GetString(prefix+"project_id"),
@ -43,13 +43,13 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
var found bool var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found { if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
} }
} }
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials)) client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil { if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
} }
k.topic = client.Topic(topicName) k.topic = client.Topic(topicName)
@ -57,11 +57,11 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
if !exists { if !exists {
k.topic, err = client.CreateTopic(ctx, topicName) k.topic, err = client.CreateTopic(ctx, topicName)
if err != nil { if err != nil {
glog.Fatalf("Failed to create topic %s: %v", topicName, err)
log.Fatalf("Failed to create topic %s: %v", topicName, err)
} }
} }
} else { } else {
glog.Fatalf("Failed to check topic %s: %v", topicName, err)
log.Fatalf("Failed to check topic %s: %v", topicName, err)
} }
return nil return nil

10
weed/notification/kafka/kafka_queue.go

@ -2,7 +2,7 @@ package kafka
import ( import (
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -22,8 +22,8 @@ func (k *KafkaQueue) GetName() string {
} }
func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
log.Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
log.Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize( return k.initialize(
configuration.GetStringSlice(prefix+"hosts"), configuration.GetStringSlice(prefix+"hosts"),
configuration.GetString(prefix+"topic"), configuration.GetString(prefix+"topic"),
@ -67,7 +67,7 @@ func (k *KafkaQueue) handleSuccess() {
for { for {
pm := <-k.producer.Successes() pm := <-k.producer.Successes()
if pm != nil { if pm != nil {
glog.V(3).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
log.Tracef("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
} }
} }
} }
@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() {
for { for {
err := <-k.producer.Errors() err := <-k.producer.Errors()
if err != nil { if err != nil {
glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
log.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
} }
} }
} }

4
weed/notification/log/log_queue.go

@ -1,7 +1,7 @@
package kafka package kafka
import ( import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -24,6 +24,6 @@ func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (
func (k *LogQueue) SendMessage(key string, message proto.Message) (err error) { func (k *LogQueue) SendMessage(key string, message proto.Message) (err error) {
glog.V(0).Infof("%v: %+v", key, message)
log.Infof("%v: %+v", key, message)
return nil return nil
} }

8
weed/operation/chunked_file.go

@ -12,7 +12,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -57,7 +57,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error)
if isCompressed { if isCompressed {
var err error var err error
if buffer, err = util.DecompressData(buffer); err != nil { if buffer, err = util.DecompressData(buffer); err != nil {
glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
log.Infof("fail to decompress chunk manifest: %v", err)
} }
} }
cm := ChunkManifest{} cm := ChunkManifest{}
@ -79,12 +79,12 @@ func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDial
} }
results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds) results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
if err != nil { if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
log.Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err) return fmt.Errorf("chunk delete: %v", err)
} }
for _, result := range results { for _, result := range results {
if result.Error != "" { if result.Error != "" {
glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error)
log.Infof("delete file %+v: %v", result.FileId, result.Error)
return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error) return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error)
} }
} }

4
weed/operation/grpc_client.go

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -32,7 +32,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
sepIndex := strings.LastIndex(volumeServer, ":") sepIndex := strings.LastIndex(volumeServer, ":")
port, err := strconv.Atoi(volumeServer[sepIndex+1:]) port, err := strconv.Atoi(volumeServer[sepIndex+1:])
if err != nil { if err != nil {
glog.Errorf("failed to parse volume server address: %v", volumeServer)
log.Errorf("failed to parse volume server address: %v", volumeServer)
return "", err return "", err
} }
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil

6
weed/operation/lookup_vid_cache.go

@ -6,7 +6,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
) )
var ErrorNotFound = errors.New("not found") var ErrorNotFound = errors.New("not found")
@ -23,7 +23,7 @@ type VidCache struct {
func (vc *VidCache) Get(vid string) ([]Location, error) { func (vc *VidCache) Get(vid string) ([]Location, error) {
id, err := strconv.Atoi(vid) id, err := strconv.Atoi(vid)
if err != nil { if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid)
log.Debugf("Unknown volume id %s", vid)
return nil, err return nil, err
} }
vc.RLock() vc.RLock()
@ -42,7 +42,7 @@ func (vc *VidCache) Get(vid string) ([]Location, error) {
func (vc *VidCache) Set(vid string, locations []Location, duration time.Duration) { func (vc *VidCache) Set(vid string, locations []Location, duration time.Duration) {
id, err := strconv.Atoi(vid) id, err := strconv.Atoi(vid)
if err != nil { if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid)
log.Debugf("Unknown volume id %s", vid)
return return
} }
vc.Lock() vc.Lock()

10
weed/operation/submit.go

@ -11,7 +11,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
) )
@ -91,14 +91,14 @@ func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
func newFilePart(fullPathFilename string) (ret FilePart, err error) { func newFilePart(fullPathFilename string) (ret FilePart, err error) {
fh, openErr := os.Open(fullPathFilename) fh, openErr := os.Open(fullPathFilename)
if openErr != nil { if openErr != nil {
glog.V(0).Info("Failed to open file: ", fullPathFilename)
log.Info("Failed to open file: ", fullPathFilename)
return ret, openErr return ret, openErr
} }
ret.Reader = fh ret.Reader = fh
fi, fiErr := fh.Stat() fi, fiErr := fh.Stat()
if fiErr != nil { if fiErr != nil {
glog.V(0).Info("Failed to stat file:", fullPathFilename)
log.Info("Failed to stat file:", fullPathFilename)
return ret, fiErr return ret, fiErr
} }
ret.ModTime = fi.ModTime().UTC().Unix() ret.ModTime = fi.ModTime().UTC().Unix()
@ -210,7 +210,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
func upload_one_chunk(filename string, reader io.Reader, master, func upload_one_chunk(filename string, reader io.Reader, master,
fileUrl string, jwt security.EncodedJwt, fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) { ) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
log.Trace("Uploading part ", filename, " to ", fileUrl, "...")
uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil { if uploadError != nil {
return 0, uploadError return 0, uploadError
@ -223,7 +223,7 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil { if e != nil {
return e return e
} }
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
log.Trace("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl) u, _ := url.Parse(fileUrl)
q := u.Query() q := u.Query()
q.Set("cm", "true") q.Set("cm", "true")

16
weed/operation/upload_content.go

@ -15,7 +15,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -97,7 +97,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by
if err == nil { if err == nil {
return return
} else { } else {
glog.Warningf("uploading to %s: %v", uploadUrl, err)
log.Warnf("uploading to %s: %v", uploadUrl, err)
} }
} }
return return
@ -203,22 +203,22 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
file_writer, cp_err := body_writer.CreatePart(h) file_writer, cp_err := body_writer.CreatePart(h)
if cp_err != nil { if cp_err != nil {
glog.V(0).Infoln("error creating form file", cp_err.Error())
log.Infoln("error creating form file", cp_err.Error())
return nil, cp_err return nil, cp_err
} }
if err := fillBufferFunction(file_writer); err != nil { if err := fillBufferFunction(file_writer); err != nil {
glog.V(0).Infoln("error copying data", err)
log.Infoln("error copying data", err)
return nil, err return nil, err
} }
content_type := body_writer.FormDataContentType() content_type := body_writer.FormDataContentType()
if err := body_writer.Close(); err != nil { if err := body_writer.Close(); err != nil {
glog.V(0).Infoln("error closing body", err)
log.Infoln("error closing body", err)
return nil, err return nil, err
} }
req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes())) req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes()))
if postErr != nil { if postErr != nil {
glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr)
log.Debugf("create upload request %s: %v", uploadUrl, postErr)
return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr) return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr)
} }
req.Header.Set("Content-Type", content_type) req.Header.Set("Content-Type", content_type)
@ -231,7 +231,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
// print("+") // print("+")
resp, post_err := HttpClient.Do(req) resp, post_err := HttpClient.Do(req)
if post_err != nil { if post_err != nil {
glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
log.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
debug.PrintStack() debug.PrintStack()
return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err) return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
} }
@ -252,7 +252,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
unmarshal_err := json.Unmarshal(resp_body, &ret) unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil { if unmarshal_err != nil {
glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
log.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err) return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err)
} }
if ret.Error != "" { if ret.Error != "" {

22
weed/pb/filer_pb/filer_client.go

@ -10,7 +10,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -35,18 +35,18 @@ func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry
Name: name, Name: name,
} }
// glog.V(3).Infof("read %s request: %v", fullFilePath, request)
// log.Tracef("read %s request: %v", fullFilePath, request)
resp, err := LookupEntry(client, request) resp, err := LookupEntry(client, request)
if err != nil { if err != nil {
if err == ErrNotFound { if err == ErrNotFound {
return nil return nil
} }
glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
log.Tracef("read %s %v: %v", fullFilePath, resp, err)
return err return err
} }
if resp.Entry == nil { if resp.Entry == nil {
// glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
// log.Tracef("read %s entry: %v", fullFilePath, entry)
return nil return nil
} }
@ -83,7 +83,7 @@ func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, f
InclusiveStartFrom: inclusive, InclusiveStartFrom: inclusive,
} }
glog.V(4).Infof("read directory: %v", request)
log.Tracef("read directory: %v", request)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
stream, err := client.ListEntries(ctx, request) stream, err := client.ListEntries(ctx, request)
@ -130,14 +130,14 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin
Name: entryName, Name: entryName,
} }
glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
log.Tracef("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
resp, err := LookupEntry(client, request) resp, err := LookupEntry(client, request)
if err != nil { if err != nil {
if err == ErrNotFound { if err == ErrNotFound {
exists = false exists = false
return nil return nil
} }
glog.V(0).Infof("exists entry %v: %v", request, err)
log.Infof("exists entry %v: %v", request, err)
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
} }
@ -173,9 +173,9 @@ func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string,
Entry: entry, Entry: entry,
} }
glog.V(1).Infof("mkdir: %v", request)
log.Debugf("mkdir: %v", request)
if err := CreateEntry(client, request); err != nil { if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %v: %v", request, err)
log.Infof("mkdir %v: %v", request, err)
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
} }
@ -204,9 +204,9 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string
Entry: entry, Entry: entry,
} }
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
log.Debugf("create file: %s/%s", parentDirectoryPath, fileName)
if err := CreateEntry(client, request); err != nil { if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("create file %v:%v", request, err)
log.Infof("create file %v:%v", request, err)
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
} }

10
weed/pb/filer_pb/filer_pb_helper.go

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/viant/ptrie" "github.com/viant/ptrie"
@ -88,11 +88,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
resp, err := client.CreateEntry(context.Background(), request) resp, err := client.CreateEntry(context.Background(), request)
if err != nil { if err != nil {
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
return fmt.Errorf("CreateEntry: %v", err) return fmt.Errorf("CreateEntry: %v", err)
} }
if resp.Error != "" { if resp.Error != "" {
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
return fmt.Errorf("CreateEntry : %v", resp.Error) return fmt.Errorf("CreateEntry : %v", resp.Error)
} }
return nil return nil
@ -101,7 +101,7 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error { func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error {
_, err := client.UpdateEntry(context.Background(), request) _, err := client.UpdateEntry(context.Background(), request)
if err != nil { if err != nil {
glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
log.Debugf("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
return fmt.Errorf("UpdateEntry: %v", err) return fmt.Errorf("UpdateEntry: %v", err)
} }
return nil return nil
@ -113,7 +113,7 @@ func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil, ErrNotFound return nil, ErrNotFound
} }
glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
log.Tracef("read %s/%v: %v", request.Directory, request.Name, err)
return nil, fmt.Errorf("LookupEntry1: %v", err) return nil, fmt.Errorf("LookupEntry1: %v", err)
} }
if resp.Entry == nil { if resp.Entry == nil {

14
weed/pb/volume_info.go

@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
) )
@ -19,28 +19,28 @@ func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, e
volumeInfo := &volume_server_pb.VolumeInfo{} volumeInfo := &volume_server_pb.VolumeInfo{}
glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName)
log.Debugf("maybeLoadVolumeInfo checks %s", fileName)
if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead {
if !exists { if !exists {
return volumeInfo, false, nil return volumeInfo, false, nil
} }
if !canRead { if !canRead {
glog.Warningf("can not read %s", fileName)
log.Warnf("can not read %s", fileName)
return volumeInfo, false, fmt.Errorf("can not read %s", fileName) return volumeInfo, false, fmt.Errorf("can not read %s", fileName)
} }
return volumeInfo, false, nil return volumeInfo, false, nil
} }
glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
log.Debugf("maybeLoadVolumeInfo reads %s", fileName)
tierData, readErr := ioutil.ReadFile(fileName) tierData, readErr := ioutil.ReadFile(fileName)
if readErr != nil { if readErr != nil {
glog.Warningf("fail to read %s : %v", fileName, readErr)
log.Warnf("fail to read %s : %v", fileName, readErr)
return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr)
} }
glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
log.Debugf("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
glog.Warningf("unmarshal error: %v", err)
log.Warnf("unmarshal error: %v", err)
return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err)
} }

6
weed/replication/repl_util/replication_utli.go

@ -2,7 +2,7 @@ package repl_util
import ( import (
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -23,9 +23,9 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
writeErr = writeFunc(data) writeErr = writeFunc(data)
}) })
if err != nil { if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
log.Debugf("read from %s: %v", fileUrl, err)
} else if writeErr != nil { } else if writeErr != nil {
glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
log.Debugf("copy from %s: %v", fileUrl, writeErr)
} else { } else {
break break
} }

16
weed/replication/replicator.go

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"strings" "strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -37,28 +37,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return nil return nil
} }
if !strings.HasPrefix(key, r.source.Dir) { if !strings.HasPrefix(key, r.source.Dir) {
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
log.Tracef("skipping %v outside of %v", key, r.source.Dir)
return nil return nil
} }
newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]) newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
log.Tracef("replicate %s => %s", key, newKey)
key = newKey key = newKey
if message.OldEntry != nil && message.NewEntry == nil { if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
log.Tracef("deleting %v", key)
return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
} }
if message.OldEntry == nil && message.NewEntry != nil { if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
} }
if message.OldEntry == nil && message.NewEntry == nil { if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
log.Infof("weird message %+v", message)
return nil return nil
} }
foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures)
if foundExisting { if foundExisting {
glog.V(4).Infof("updated %v", key)
log.Tracef("updated %v", key)
return err return err
} }
@ -67,7 +67,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return fmt.Errorf("delete old entry %v: %v", key, err) return fmt.Errorf("delete old entry %v: %v", key, err)
} }
glog.V(4).Infof("creating missing %v", key)
log.Tracef("creating missing %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
} }

4
weed/replication/sink/azuresink/azure_sink.go

@ -10,7 +10,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -56,7 +56,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
// Use your Storage account's name and key to create a credential object. // Use your Storage account's name and key to create a credential object.
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil { if err != nil {
glog.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
log.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
} }
// Create a request pipeline that is used to process HTTP(S) requests and responses. // Create a request pipeline that is used to process HTTP(S) requests and responses.

10
weed/replication/sink/filersink/fetch_write.go

@ -8,7 +8,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -82,7 +82,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
resp, err := client.AssignVolume(context.Background(), request) resp, err := client.AssignVolume(context.Background(), request)
if err != nil { if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err return err
} }
if resp.Error != "" { if resp.Error != "" {
@ -98,16 +98,16 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
log.Tracef("replicating %s to %s header:%+v", filename, fileUrl, header)
// fetch data as is, regardless whether it is encrypted or not // fetch data as is, regardless whether it is encrypted or not
uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil { if err != nil {
glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
log.Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
return "", fmt.Errorf("upload data: %v", err) return "", fmt.Errorf("upload data: %v", err)
} }
if uploadResult.Error != "" { if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload result: %v", uploadResult.Error) return "", fmt.Errorf("upload result: %v", uploadResult.Error)
} }

28
weed/replication/sink/filersink/filer_sink.go

@ -9,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -68,10 +68,10 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
dir, name := util.FullPath(key).DirAndName() dir, name := util.FullPath(key).DirAndName()
glog.V(4).Infof("delete entry: %v", key)
log.Tracef("delete entry: %v", key)
err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures) err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
if err != nil { if err != nil {
glog.V(0).Infof("delete entry %s: %v", key, err)
log.Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err)
} }
return nil return nil
@ -88,10 +88,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Directory: dir, Directory: dir,
Name: name, Name: name,
} }
glog.V(1).Infof("lookup: %v", lookupRequest)
log.Debugf("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer.ETag(resp.Entry) == filer.ETag(entry) { if filer.ETag(resp.Entry) == filer.ETag(entry) {
glog.V(3).Infof("already replicated %s", key)
log.Tracef("already replicated %s", key)
return nil return nil
} }
} }
@ -100,10 +100,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
if err != nil { if err != nil {
// only warning here since the source chunk may have been deleted already // only warning here since the source chunk may have been deleted already
glog.Warningf("replicate entry chunks %s: %v", key, err)
log.Warnf("replicate entry chunks %s: %v", key, err)
} }
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
log.Tracef("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: dir, Directory: dir,
@ -117,9 +117,9 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Signatures: signatures, Signatures: signatures,
} }
glog.V(3).Infof("create: %v", request)
log.Tracef("create: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil { if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
log.Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err)
} }
@ -140,10 +140,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
Name: name, Name: name,
} }
glog.V(4).Infof("lookup entry: %v", request)
log.Tracef("lookup entry: %v", request)
resp, err := filer_pb.LookupEntry(client, request) resp, err := filer_pb.LookupEntry(client, request)
if err != nil { if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
log.Infof("lookup %s: %v", key, err)
return err return err
} }
@ -156,16 +156,16 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
return false, fmt.Errorf("lookup %s: %v", key, err) return false, fmt.Errorf("lookup %s: %v", key, err)
} }
glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
log.Tracef("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime { if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
// skip if already changed // skip if already changed
// this usually happens when the messages are not ordered // this usually happens when the messages are not ordered
glog.V(2).Infof("late updates %s", key)
log.Debugf("late updates %s", key)
} else if filer.ETag(newEntry) == filer.ETag(existingEntry) { } else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
// skip if no change // skip if no change
// this usually happens when retrying the replication // this usually happens when retrying the replication
glog.V(3).Infof("already replicated %s", key)
log.Tracef("already replicated %s", key)
} else { } else {
// find out what changed // find out what changed
deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)

6
weed/replication/sink/gcssink/gcs_sink.go

@ -10,7 +10,7 @@ import (
"google.golang.org/api/option" "google.golang.org/api/option"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -57,12 +57,12 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
var found bool var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found { if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
} }
} }
client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil { if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
} }
g.client = client g.client = client

10
weed/replication/sink/s3sink/s3_sink.go

@ -13,7 +13,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/replication/source"
@ -42,10 +42,10 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
} }
func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
log.Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
log.Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
log.Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
log.Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize( return s3sink.initialize(
configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"), configuration.GetString(prefix+"aws_secret_access_key"),

34
weed/replication/sink/s3sink/s3_write.go

@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -24,9 +24,9 @@ func (s3sink *S3Sink) deleteObject(key string) error {
result, err := s3sink.conn.DeleteObject(input) result, err := s3sink.conn.DeleteObject(input)
if err == nil { if err == nil {
glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
} else { } else {
glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
} }
return err return err
@ -43,9 +43,9 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) (
result, err := s3sink.conn.CreateMultipartUpload(input) result, err := s3sink.conn.CreateMultipartUpload(input)
if err == nil { if err == nil {
glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
} else { } else {
glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
return "", err return "", err
} }
@ -64,19 +64,19 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
if aerr, ok := err.(awserr.Error); ok { if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() { switch aerr.Code() {
case s3.ErrCodeNoSuchUpload: case s3.ErrCodeNoSuchUpload:
glog.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
default: default:
glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
} }
} else { } else {
// Print the error, cast err to awserr.Error to get the Code and // Print the error, cast err to awserr.Error to get the Code and
// Message from an error. // Message from an error.
glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
} }
return err return err
} }
glog.V(0).Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
return nil return nil
} }
@ -94,9 +94,9 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
result, err := s3sink.conn.CompleteMultipartUpload(input) result, err := s3sink.conn.CompleteMultipartUpload(input)
if err == nil { if err == nil {
glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
} else { } else {
glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
} }
return err return err
@ -108,7 +108,7 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
readSeeker, err := s3sink.buildReadSeeker(chunk) readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil { if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
} }
@ -122,9 +122,9 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
result, err := s3sink.conn.UploadPart(input) result, err := s3sink.conn.UploadPart(input)
if err == nil { if err == nil {
glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
log.Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
} else { } else {
glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
} }
part := &s3.CompletedPart{ part := &s3.CompletedPart{
@ -148,9 +148,9 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
result, err := s3sink.conn.UploadPartCopy(input) result, err := s3sink.conn.UploadPartCopy(input)
if err == nil { if err == nil {
glog.V(0).Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
log.Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
} else { } else {
glog.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
} }
return err return err
@ -165,7 +165,7 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, er
for _, fileUrl := range fileUrls { for _, fileUrl := range fileUrls {
_, err = util.ReadUrl(fileUrl+"?readDeleted=true", nil, false, false, chunk.Offset, int(chunk.Size), buf) _, err = util.ReadUrl(fileUrl+"?readDeleted=true", nil, false, false, chunk.Offset, int(chunk.Size), buf)
if err != nil { if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
log.Debugf("read from %s: %v", fileUrl, err)
} else { } else {
break break
} }

10
weed/replication/source/filer_source.go

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
@ -49,7 +49,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
log.Tracef("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid}, VolumeIds: []string{vid},
}) })
@ -63,14 +63,14 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
}) })
if err != nil { if err != nil {
glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err)
log.Debugf("LookupFileId volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
} }
locations := vid2Locations[vid] locations := vid2Locations[vid]
if locations == nil || len(locations.Locations) == 0 { if locations == nil || len(locations.Locations) == 0 {
glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err)
log.Debugf("LookupFileId locate volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
} }
@ -91,7 +91,7 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
for _, fileUrl := range fileUrls { for _, fileUrl := range fileUrls {
filename, header, resp, err = util.DownloadFile(fileUrl) filename, header, resp, err = util.DownloadFile(fileUrl)
if err != nil { if err != nil {
glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
log.Debugf("fail to read from %s: %v", fileUrl, err)
} else { } else {
break break
} }

8
weed/replication/sub/notification_aws_sqs.go

@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *AwsSqsInput) GetName() string {
} }
func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
log.Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
log.Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize( return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"), configuration.GetString(prefix+"aws_secret_access_key"),
@ -106,7 +106,7 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
}) })
if err != nil { if err != nil {
glog.V(1).Infof("delete message from sqs %s: %v", k.queueUrl, err)
log.Debugf("delete message from sqs %s: %v", k.queueUrl, err)
} }
return return

4
weed/replication/sub/notification_gocdk_pub_sub.go

@ -3,7 +3,7 @@ package sub
import ( import (
"context" "context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -29,7 +29,7 @@ func (k *GoCDKPubSubInput) GetName() string {
func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
subURL := configuration.GetString(prefix + "sub_url") subURL := configuration.GetString(prefix + "sub_url")
glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
log.Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
sub, err := pubsub.OpenSubscription(context.Background(), subURL) sub, err := pubsub.OpenSubscription(context.Background(), subURL)
if err != nil { if err != nil {
return err return err

18
weed/replication/sub/notification_google_pub_sub.go

@ -6,7 +6,7 @@ import (
"os" "os"
"cloud.google.com/go/pubsub" "cloud.google.com/go/pubsub"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *GooglePubSubInput) GetName() string {
} }
func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize( return k.initialize(
configuration.GetString(prefix+"google_application_credentials"), configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"), configuration.GetString(prefix+"project_id"),
@ -45,13 +45,13 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
var found bool var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS") google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found { if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
} }
} }
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials)) client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil { if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
} }
k.topicName = topicName k.topicName = topicName
@ -60,11 +60,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists { if !exists {
topic, err = client.CreateTopic(ctx, topicName) topic, err = client.CreateTopic(ctx, topicName)
if err != nil { if err != nil {
glog.Fatalf("Failed to create topic %s: %v", topicName, err)
log.Fatalf("Failed to create topic %s: %v", topicName, err)
} }
} }
} else { } else {
glog.Fatalf("Failed to check topic %s: %v", topicName, err)
log.Fatalf("Failed to check topic %s: %v", topicName, err)
} }
subscriptionName := "seaweedfs_sub" subscriptionName := "seaweedfs_sub"
@ -74,11 +74,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists { if !exists {
k.sub, err = client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic}) k.sub, err = client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic})
if err != nil { if err != nil {
glog.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
log.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
} }
} }
} else { } else {
glog.Fatalf("Failed to check subscription %s: %v", topicName, err)
log.Fatalf("Failed to check subscription %s: %v", topicName, err)
} }
k.messageChan = make(chan *pubsub.Message, 1) k.messageChan = make(chan *pubsub.Message, 1)

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save