Browse Source

go fmt

pull/2174/head
Chris Lu 4 years ago
parent
commit
b624090398
  1. 4
      weed/command/fuse.go
  2. 2
      weed/command/upload.go
  3. 2
      weed/filesys/dirty_pages_temp_file.go
  4. 2
      weed/filesys/dirty_pages_temp_interval.go
  5. 2
      weed/filesys/file.go
  6. 2
      weed/server/filer_server.go
  7. 2
      weed/server/master_server.go
  8. 2
      weed/server/master_server_handlers.go
  9. 2
      weed/server/volume_server_handlers_read.go
  10. 8
      weed/storage/needle/needle_read_write.go
  11. 2
      weed/topology/node.go
  12. 8
      weed/util/grace/pprof.go

4
weed/command/fuse.go

@ -2,10 +2,10 @@ package command
import ( import (
"fmt" "fmt"
"strings"
"os"
"strconv" "strconv"
"strings"
"time" "time"
"os"
) )
func init() { func init() {

2
weed/command/upload.go

@ -110,7 +110,7 @@ func runUpload(cmd *Command, args []string) bool {
}) })
if err != nil { if err != nil {
fmt.Println(err.Error()) fmt.Println(err.Error())
return false;
return false
} }
} else { } else {
parts, e := operation.NewFileParts(args) parts, e := operation.NewFileParts(args)

2
weed/filesys/dirty_pages_temp_file.go

@ -97,7 +97,7 @@ func (pages *TempFileDirtyPages) saveExistingPagesToStorage() {
for _, list := range pages.writtenIntervals.lists { for _, list := range pages.writtenIntervals.lists {
listStopOffset := list.Offset() + list.Size() listStopOffset := list.Offset() + list.Size()
for uploadedOffset:=int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
for uploadedOffset := int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
start, stop := max(list.Offset(), uploadedOffset), min(listStopOffset, uploadedOffset+pageSize) start, stop := max(list.Offset(), uploadedOffset), min(listStopOffset, uploadedOffset+pageSize)
if start >= stop { if start >= stop {
continue continue

2
weed/filesys/dirty_pages_temp_interval.go

@ -54,7 +54,7 @@ func (list *WrittenIntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.DataOffset), min(stop, t.DataOffset+t.Size) nodeStart, nodeStop := max(start, t.DataOffset), min(stop, t.DataOffset+t.Size)
if nodeStart < nodeStop { if nodeStart < nodeStop {
// glog.V(4).Infof("copying start=%d stop=%d t=[%d,%d) => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.DataOffset, t.DataOffset+t.Size, len(buf), nodeStart, nodeStop) // glog.V(4).Infof("copying start=%d stop=%d t=[%d,%d) => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.DataOffset, t.DataOffset+t.Size, len(buf), nodeStart, nodeStop)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset + nodeStart - t.DataOffset)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset+nodeStart-t.DataOffset)
} }
if t.Next == nil { if t.Next == nil {

2
weed/filesys/file.go

@ -144,7 +144,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.dirtyMetadata = true file.dirtyMetadata = true
} }
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode){
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
entry.Attributes.FileMode = uint32(req.Mode) entry.Attributes.FileMode = uint32(req.Mode)
file.dirtyMetadata = true file.dirtyMetadata = true
} }

2
weed/server/filer_server.go

@ -30,11 +30,11 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis" _ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2" _ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"

2
weed/server/master_server.go

@ -97,7 +97,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
ms := &MasterServer{ ms := &MasterServer{
option: option, option: option,
preallocateSize: preallocateSize, preallocateSize: preallocateSize,
vgCh: make(chan *topology.VolumeGrowRequest, 1 << 6),
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
clientChans: make(map[string]chan *master_pb.VolumeLocation), clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers), MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers),

2
weed/server/master_server_handlers.go

@ -123,7 +123,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
Count: writableVolumeCount, Count: writableVolumeCount,
ErrCh: errCh, ErrCh: errCh,
} }
if err := <- errCh; err != nil {
if err := <-errCh; err != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("cannot grow volume group! %v", err)) writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("cannot grow volume group! %v", err))
return return
} }

2
weed/server/volume_server_handlers_read.go

@ -65,7 +65,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
lookupResult, err := operation.Lookup(vs.GetMaster, volumeId.String()) lookupResult, err := operation.Lookup(vs.GetMaster, volumeId.String())
glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err) glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
if err != nil || len(lookupResult.Locations) <= 0{
if err != nil || len(lookupResult.Locations) <= 0 {
glog.V(0).Infoln("lookup error:", err, r.URL.Path) glog.V(0).Infoln("lookup error:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound) w.WriteHeader(http.StatusNotFound)
return return

8
weed/storage/needle/needle_read_write.go

@ -52,7 +52,7 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
writeBytes.Write(n.Data) writeBytes.Write(n.Data)
padding := PaddingLength(n.Size, version) padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
writeBytes.Write(header[0:NeedleChecksumSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+padding])
return size, actualSize, nil return size, actualSize, nil
case Version2, Version3: case Version2, Version3:
header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
@ -104,7 +104,7 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
} }
if n.HasLastModifiedDate() { if n.HasLastModifiedDate() {
util.Uint64toBytes(header[0:8], n.LastModified) util.Uint64toBytes(header[0:8], n.LastModified)
writeBytes.Write(header[8-LastModifiedBytesLength:8])
writeBytes.Write(header[8-LastModifiedBytesLength : 8])
} }
if n.HasTtl() && n.Ttl != nil { if n.HasTtl() && n.Ttl != nil {
n.Ttl.ToBytes(header[0:TtlBytesLength]) n.Ttl.ToBytes(header[0:TtlBytesLength])
@ -119,11 +119,11 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
padding := PaddingLength(n.Size, version) padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
if version == Version2 { if version == Version2 {
writeBytes.Write(header[0:NeedleChecksumSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+padding])
} else { } else {
// version3 // version3
util.Uint64toBytes(header[NeedleChecksumSize:NeedleChecksumSize+TimestampSize], n.AppendAtNs) util.Uint64toBytes(header[NeedleChecksumSize:NeedleChecksumSize+TimestampSize], n.AppendAtNs)
writeBytes.Write(header[0:NeedleChecksumSize+TimestampSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+TimestampSize+padding])
} }
return Size(n.DataSize), GetActualSize(n.Size, version), nil return Size(n.DataSize), GetActualSize(n.Size, version), nil

2
weed/topology/node.go

@ -243,7 +243,7 @@ func (n *NodeImpl) CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSi
if v.Size >= volumeSizeLimit { if v.Size >= volumeSizeLimit {
//fmt.Println("volume",v.Id,"size",v.Size,">",volumeSizeLimit) //fmt.Println("volume",v.Id,"size",v.Size,">",volumeSizeLimit)
n.GetTopology().chanFullVolumes <- v n.GetTopology().chanFullVolumes <- v
}else if float64(v.Size) > float64(volumeSizeLimit) * growThreshold {
} else if float64(v.Size) > float64(volumeSizeLimit)*growThreshold {
n.GetTopology().chanCrowdedVolumes <- v n.GetTopology().chanCrowdedVolumes <- v
} }
} }

8
weed/util/grace/pprof.go

@ -21,21 +21,21 @@ func SetupProfiling(cpuProfile, memProfile string) {
pprof.StopCPUProfile() pprof.StopCPUProfile()
// write block pprof // write block pprof
blockF, err := os.Create(cpuProfile+".block")
blockF, err := os.Create(cpuProfile + ".block")
if err != nil { if err != nil {
return return
} }
p := pprof.Lookup("block") p := pprof.Lookup("block")
p.WriteTo(blockF,0)
p.WriteTo(blockF, 0)
blockF.Close() blockF.Close()
// write mutex pprof // write mutex pprof
mutexF, err := os.Create(cpuProfile+".mutex")
mutexF, err := os.Create(cpuProfile + ".mutex")
if err != nil { if err != nil {
return return
} }
p = pprof.Lookup("mutex") p = pprof.Lookup("mutex")
p.WriteTo(mutexF,0)
p.WriteTo(mutexF, 0)
mutexF.Close() mutexF.Close()
}) })

Loading…
Cancel
Save