Browse Source

clean up log fmt usage. Move to log for important data changes,

warnings.
pull/2/head
Chris Lu 12 years ago
parent
commit
ac15868694
  1. 7
      go/replication/volume_growth.go
  2. 10
      go/storage/needle.go
  3. 3
      go/storage/needle_read_write.go
  4. 4
      go/storage/volume.go
  5. 6
      go/topology/node.go
  6. 4
      go/topology/node_list.go
  7. 5
      go/topology/topology.go
  8. 22
      go/topology/topology_compact.go
  9. 8
      go/topology/topology_event_handling.go
  10. 10
      go/topology/volume_layout.go

7
go/replication/volume_growth.go

@ -6,6 +6,7 @@ import (
"code.google.com/p/weed-fs/go/topology" "code.google.com/p/weed-fs/go/topology"
"errors" "errors"
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"sync" "sync"
) )
@ -106,7 +107,6 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
nl := topology.NewNodeList(topo.Children(), nil) nl := topology.NewNodeList(topo.Children(), nil)
picked, ret := nl.RandomlyPickN(2, 1, dataCenter) picked, ret := nl.RandomlyPickN(2, 1, dataCenter)
vid := topo.NextVolumeId() vid := topo.NextVolumeId()
println("growing on picked servers", picked)
if ret { if ret {
var servers []*topology.DataNode var servers []*topology.DataNode
for _, n := range picked { for _, n := range picked {
@ -116,7 +116,6 @@ func (vg *VolumeGrowth) GrowByCountAndType(count int, repType storage.Replicatio
} }
} }
} }
println("growing on servers", servers)
if len(servers) == 2 { if len(servers) == 2 {
if err = vg.grow(topo, vid, repType, servers...); err == nil { if err = vg.grow(topo, vid, repType, servers...); err == nil {
counter++ counter++
@ -193,9 +192,9 @@ func (vg *VolumeGrowth) grow(topo *topology.Topology, vid storage.VolumeId, repT
vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion} vi := storage.VolumeInfo{Id: vid, Size: 0, RepType: repType, Version: storage.CurrentVersion}
server.AddOrUpdateVolume(vi) server.AddOrUpdateVolume(vi)
topo.RegisterVolumeLayout(&vi, server) topo.RegisterVolumeLayout(&vi, server)
fmt.Println("Created Volume", vid, "on", server)
log.Println("Created Volume", vid, "on", server)
} else { } else {
fmt.Println("Failed to assign", vid, "to", servers, "error", err)
log.Println("Failed to assign", vid, "to", servers, "error", err)
return errors.New("Failed to assign " + vid.String()) return errors.New("Failed to assign " + vid.String())
} }
} }

10
go/storage/needle.go

@ -4,7 +4,7 @@ import (
"code.google.com/p/weed-fs/go/util" "code.google.com/p/weed-fs/go/util"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt"
"log"
"io/ioutil" "io/ioutil"
"mime" "mime"
"net/http" "net/http"
@ -43,13 +43,13 @@ func NewNeedle(r *http.Request) (n *Needle, e error) {
n = new(Needle) n = new(Needle)
form, fe := r.MultipartReader() form, fe := r.MultipartReader()
if fe != nil { if fe != nil {
fmt.Println("MultipartReader [ERROR]", fe)
log.Println("MultipartReader [ERROR]", fe)
e = fe e = fe
return return
} }
part, fe := form.NextPart() part, fe := form.NextPart()
if fe != nil { if fe != nil {
fmt.Println("Reading Multi part [ERROR]", fe)
log.Println("Reading Multi part [ERROR]", fe)
e = fe e = fe
return return
} }
@ -114,7 +114,7 @@ func (n *Needle) ParsePath(fid string) {
length := len(fid) length := len(fid)
if length <= 8 { if length <= 8 {
if length > 0 { if length > 0 {
println("Invalid fid", fid, "length", length)
log.Println("Invalid fid", fid, "length", length)
} }
return return
} }
@ -136,7 +136,7 @@ func ParseKeyHash(key_hash_string string) (uint64, uint32) {
key_hash_bytes, khe := hex.DecodeString(key_hash_string) key_hash_bytes, khe := hex.DecodeString(key_hash_string)
key_hash_len := len(key_hash_bytes) key_hash_len := len(key_hash_bytes)
if khe != nil || key_hash_len <= 4 { if khe != nil || key_hash_len <= 4 {
println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
log.Println("Invalid key_hash", key_hash_string, "length:", key_hash_len, "error", khe)
return 0, 0 return 0, 0
} }
key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4]) key := util.BytesToUint64(key_hash_bytes[0 : key_hash_len-4])

3
go/storage/needle_read_write.go

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log"
"os" "os"
) )
@ -26,7 +27,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) {
defer func(s io.Seeker, off int64) { defer func(s io.Seeker, off int64) {
if err != nil { if err != nil {
if _, e = s.Seek(off, 0); e != nil { if _, e = s.Seek(off, 0); e != nil {
fmt.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
log.Printf("Failed to seek %s back to %d with error: %s\n", w, off, e)
} }
} }
}(s, end) }(s, end)

4
go/storage/volume.go

@ -108,7 +108,7 @@ func (v *Volume) Size() int64 {
if e == nil { if e == nil {
return stat.Size() return stat.Size()
} }
fmt.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
log.Printf("Failed to read file size %s %s\n", v.dataFile.Name(), e.Error())
return -1 return -1
} }
func (v *Volume) Close() { func (v *Volume) Close() {
@ -120,7 +120,7 @@ func (v *Volume) Close() {
func (v *Volume) maybeWriteSuperBlock() error { func (v *Volume) maybeWriteSuperBlock() error {
stat, e := v.dataFile.Stat() stat, e := v.dataFile.Stat()
if e != nil { if e != nil {
fmt.Printf("failed to stat datafile %s: %s", v.dataFile, e)
log.Printf("failed to stat datafile %s: %s", v.dataFile, e)
return e return e
} }
if stat.Size() == 0 { if stat.Size() == 0 {

6
go/topology/node.go

@ -2,7 +2,7 @@ package topology
import ( import (
"code.google.com/p/weed-fs/go/storage" "code.google.com/p/weed-fs/go/storage"
"fmt"
"log"
) )
type NodeId string type NodeId string
@ -155,7 +155,7 @@ func (n *NodeImpl) LinkChildNode(node Node) {
n.UpAdjustVolumeCountDelta(node.GetVolumeCount()) n.UpAdjustVolumeCountDelta(node.GetVolumeCount())
n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount()) n.UpAdjustActiveVolumeCountDelta(node.GetActiveVolumeCount())
node.SetParent(n) node.SetParent(n)
fmt.Println(n, "adds child", node.Id())
log.Println(n, "adds child", node.Id())
} }
} }
@ -167,7 +167,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) n.UpAdjustVolumeCountDelta(-node.GetVolumeCount())
n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount())
n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount())
fmt.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
log.Println(n, "removes", node, "volumeCount =", n.activeVolumeCount)
} }
} }

4
go/topology/node_list.go

@ -2,7 +2,7 @@ package topology
import ( import (
"code.google.com/p/weed-fs/go/storage" "code.google.com/p/weed-fs/go/storage"
"fmt"
"log"
"math/rand" "math/rand"
) )
@ -70,7 +70,7 @@ func (nl *NodeList) ReserveOneVolume(randomVolumeIndex int, vid storage.VolumeId
randomVolumeIndex -= freeSpace randomVolumeIndex -= freeSpace
} else { } else {
if node.IsDataNode() && node.FreeSpace() > 0 { if node.IsDataNode() && node.FreeSpace() > 0 {
fmt.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
log.Println("vid =", vid, " assigned to node =", node, ", freeSpace =", node.FreeSpace())
return true, node.(*DataNode) return true, node.(*DataNode)
} }
children := node.Children() children := node.Children()

5
go/topology/topology.go

@ -4,7 +4,6 @@ import (
"code.google.com/p/weed-fs/go/sequence" "code.google.com/p/weed-fs/go/sequence"
"code.google.com/p/weed-fs/go/storage" "code.google.com/p/weed-fs/go/storage"
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand" "math/rand"
@ -74,7 +73,7 @@ func (t *Topology) Lookup(vid storage.VolumeId) []*DataNode {
func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) { func (t *Topology) RandomlyReserveOneVolume(dataCenter string) (bool, *DataNode, *storage.VolumeId) {
if t.FreeSpace() <= 0 { if t.FreeSpace() <= 0 {
fmt.Println("Topology does not have free space left!")
log.Println("Topology does not have free space left!")
return false, nil, nil return false, nil, nil
} }
vid := t.NextVolumeId() vid := t.NextVolumeId()
@ -103,7 +102,7 @@ func (t *Topology) PickForWrite(repType storage.ReplicationType, count int, data
func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout { func (t *Topology) GetVolumeLayout(repType storage.ReplicationType) *VolumeLayout {
replicationTypeIndex := repType.GetReplicationLevelIndex() replicationTypeIndex := repType.GetReplicationLevelIndex()
if t.replicaType2VolumeLayout[replicationTypeIndex] == nil { if t.replicaType2VolumeLayout[replicationTypeIndex] == nil {
fmt.Println("adding replication type", repType)
log.Println("adding replication type", repType)
t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse) t.replicaType2VolumeLayout[replicationTypeIndex] = NewVolumeLayout(repType, t.volumeSizeLimit, t.pulse)
} }
return t.replicaType2VolumeLayout[replicationTypeIndex] return t.replicaType2VolumeLayout[replicationTypeIndex]

22
go/topology/topology_compact.go

@ -5,7 +5,7 @@ import (
"code.google.com/p/weed-fs/go/util" "code.google.com/p/weed-fs/go/util"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"log"
"net/url" "net/url"
"time" "time"
) )
@ -14,12 +14,12 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
ch := make(chan bool, locationlist.Length()) ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list { for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) { go func(index int, url string, vid storage.VolumeId) {
//fmt.Println(index, "Check vacuuming", vid, "on", dn.Url())
//log.Println(index, "Check vacuuming", vid, "on", dn.Url())
if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil { if e, ret := vacuumVolume_Check(url, vid, garbageThreshold); e != nil {
//fmt.Println(index, "Error when checking vacuuming", vid, "on", url, e)
//log.Println(index, "Error when checking vacuuming", vid, "on", url, e)
ch <- false ch <- false
} else { } else {
//fmt.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
//log.Println(index, "Checked vacuuming", vid, "on", url, "needVacuum", ret)
ch <- ret ch <- ret
} }
}(index, dn.Url(), vid) }(index, dn.Url(), vid)
@ -41,12 +41,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
ch := make(chan bool, locationlist.Length()) ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list { for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) { go func(index int, url string, vid storage.VolumeId) {
fmt.Println(index, "Start vacuuming", vid, "on", url)
log.Println(index, "Start vacuuming", vid, "on", url)
if e := vacuumVolume_Compact(url, vid); e != nil { if e := vacuumVolume_Compact(url, vid); e != nil {
fmt.Println(index, "Error when vacuuming", vid, "on", url, e)
log.Println(index, "Error when vacuuming", vid, "on", url, e)
ch <- false ch <- false
} else { } else {
fmt.Println(index, "Complete vacuuming", vid, "on", url)
log.Println(index, "Complete vacuuming", vid, "on", url)
ch <- true ch <- true
} }
}(index, dn.Url(), vid) }(index, dn.Url(), vid)
@ -65,12 +65,12 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
isCommitSuccess := true isCommitSuccess := true
for _, dn := range locationlist.list { for _, dn := range locationlist.list {
fmt.Println("Start Commiting vacuum", vid, "on", dn.Url())
log.Println("Start Commiting vacuum", vid, "on", dn.Url())
if e := vacuumVolume_Commit(dn.Url(), vid); e != nil { if e := vacuumVolume_Commit(dn.Url(), vid); e != nil {
fmt.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
log.Println("Error when committing vacuum", vid, "on", dn.Url(), e)
isCommitSuccess = false isCommitSuccess = false
} else { } else {
fmt.Println("Complete Commiting vacuum", vid, "on", dn.Url())
log.Println("Complete Commiting vacuum", vid, "on", dn.Url())
} }
} }
if isCommitSuccess { if isCommitSuccess {
@ -104,7 +104,7 @@ func vacuumVolume_Check(urlLocation string, vid storage.VolumeId, garbageThresho
values.Add("garbageThreshold", garbageThreshold) values.Add("garbageThreshold", garbageThreshold)
jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values) jsonBlob, err := util.Post("http://"+urlLocation+"/admin/vacuum_volume_check", values)
if err != nil { if err != nil {
fmt.Println("parameters:", values)
log.Println("parameters:", values)
return err, false return err, false
} }
var ret VacuumVolumeResult var ret VacuumVolumeResult

8
go/topology/topology_event_handling.go

@ -2,7 +2,7 @@ package topology
import ( import (
"code.google.com/p/weed-fs/go/storage" "code.google.com/p/weed-fs/go/storage"
"fmt"
"log"
"math/rand" "math/rand"
"time" "time"
) )
@ -28,10 +28,10 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) {
t.SetVolumeCapacityFull(v) t.SetVolumeCapacityFull(v)
case dn := <-t.chanRecoveredDataNodes: case dn := <-t.chanRecoveredDataNodes:
t.RegisterRecoveredDataNode(dn) t.RegisterRecoveredDataNode(dn)
fmt.Println("DataNode", dn, "is back alive!")
log.Println("DataNode", dn, "is back alive!")
case dn := <-t.chanDeadDataNodes: case dn := <-t.chanDeadDataNodes:
t.UnRegisterDataNode(dn) t.UnRegisterDataNode(dn)
fmt.Println("DataNode", dn, "is dead!")
log.Println("DataNode", dn, "is dead!")
} }
} }
}() }()
@ -48,7 +48,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
} }
func (t *Topology) UnRegisterDataNode(dn *DataNode) { func (t *Topology) UnRegisterDataNode(dn *DataNode) {
for _, v := range dn.volumes { for _, v := range dn.volumes {
fmt.Println("Removing Volume", v.Id, "from the dead volume server", dn)
log.Println("Removing Volume", v.Id, "from the dead volume server", dn)
vl := t.GetVolumeLayout(v.RepType) vl := t.GetVolumeLayout(v.RepType)
vl.SetVolumeUnavailable(dn, v.Id) vl.SetVolumeUnavailable(dn, v.Id)
} }

10
go/topology/volume_layout.go

@ -3,7 +3,7 @@ package topology
import ( import (
"code.google.com/p/weed-fs/go/storage" "code.google.com/p/weed-fs/go/storage"
"errors" "errors"
"fmt"
"log"
"math/rand" "math/rand"
) )
@ -54,7 +54,7 @@ func (vl *VolumeLayout) Lookup(vid storage.VolumeId) []*DataNode {
func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) { func (vl *VolumeLayout) PickForWrite(count int, dataCenter string) (*storage.VolumeId, int, *VolumeLocationList, error) {
len_writers := len(vl.writables) len_writers := len(vl.writables)
if len_writers <= 0 { if len_writers <= 0 {
fmt.Println("No more writable volumes!")
log.Println("No more writable volumes!")
return nil, 0, nil, errors.New("No more writable volumes!") return nil, 0, nil, errors.New("No more writable volumes!")
} }
if dataCenter == "" { if dataCenter == "" {
@ -102,7 +102,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(dataCenter string) int {
func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool { func (vl *VolumeLayout) removeFromWritable(vid storage.VolumeId) bool {
for i, v := range vl.writables { for i, v := range vl.writables {
if v == vid { if v == vid {
fmt.Println("Volume", vid, "becomes unwritable")
log.Println("Volume", vid, "becomes unwritable")
vl.writables = append(vl.writables[:i], vl.writables[i+1:]...) vl.writables = append(vl.writables[:i], vl.writables[i+1:]...)
return true return true
} }
@ -115,7 +115,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
return false return false
} }
} }
fmt.Println("Volume", vid, "becomes writable")
log.Println("Volume", vid, "becomes writable")
vl.writables = append(vl.writables, vid) vl.writables = append(vl.writables, vid)
return true return true
} }
@ -123,7 +123,7 @@ func (vl *VolumeLayout) setVolumeWritable(vid storage.VolumeId) bool {
func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool { func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid storage.VolumeId) bool {
if vl.vid2location[vid].Remove(dn) { if vl.vid2location[vid].Remove(dn) {
if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() { if vl.vid2location[vid].Length() < vl.repType.GetCopyCount() {
fmt.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
log.Println("Volume", vid, "has", vl.vid2location[vid].Length(), "replica, less than required", vl.repType.GetCopyCount())
return vl.removeFromWritable(vid) return vl.removeFromWritable(vid)
} }
} }

Loading…
Cancel
Save