Browse Source

Merge pull request #948 from Wine93/style

Style
pull/991/head
Chris Lu 6 years ago
committed by GitHub
parent
commit
ac52db3bed
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 8
      README.md
  2. 98
      util/gostd
  3. 2
      weed/command/server.go
  4. 6
      weed/server/master_grpc_server.go
  5. 4
      weed/server/volume_grpc_client_to_master.go
  6. 1
      weed/shell/command_volume_unmount.go
  7. 1
      weed/storage/needle/needle.go
  8. 3
      weed/storage/needle/needle_read_write.go
  9. 6
      weed/storage/needle/volume_id_test.go
  10. 4
      weed/storage/volume_checking.go
  11. 6
      weed/storage/volume_info.go
  12. 4
      weed/topology/node.go
  13. 4
      weed/topology/topology_test.go
  14. 1
      weed/util/compression.go

8
README.md

@ -363,7 +363,7 @@ GlusterFS hashes the path and filename into ids, and assigned to virtual volumes
Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120)
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage.
Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews.
@ -388,6 +388,12 @@ Other key features include: Erasure Encoding, JWT security.
This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)!
BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop:
```
$ ./util/gostd
```
[Back to TOC](#table-of-contents) [Back to TOC](#table-of-contents)
## Installation Guide ## ## Installation Guide ##

98
util/gostd

@ -0,0 +1,98 @@
#!/usr/bin/env bash
############################ GLOBAL VARIABLES
regex=' '
branch="master"
max_length=150
REGEX_SUFFIX_GO=".+\.go$"
############################ FUNCTIONS
msg() {
printf '%b' "$1" >&2
}
die() {
msg "\33[31m[✘]\33[0m ${1}${2}"
exit 1
}
succ() {
msg "\33[34m[√]\33[0m ${1}${2}"
}
gostd() {
local branch=$1
local reg4exclude=$2
local max_length=$3
for file in `git diff $branch --name-only`
do
if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then
continue
fi
error=`go fmt $file 2>&1`
if ! [ $? -eq 0 ]; then
die "go fmt $file:" "$error"
fi
succ "$file\n"
grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }'
done
}
get_options() {
while getopts "b:e:hl:" opts
do
case $opts in
b)
branch=$OPTARG
;;
e)
regex=$OPTARG
;;
h)
usage
exit 0
;;
l)
max_length=$OPTARG
;;
\?)
usage
exit 1
;;
esac
done
}
usage () {
cat << _EOC_
Usage:
gostd [options]
Options:
-b <branch/commit> Specify the git diff branch or commit.
(default: master)
-e <regex> Regex for excluding file or directory.
-h Print this usage.
-l <length> Show files that exceed the limit line length.
(default: 150)
Examples:
gostd
gostd -b master -l 100
gostd -b 59d532a -e weed/pb -l 100
_EOC_
}
main() {
get_options "$@"
gostd "$branch" "$regex" "$max_length"
}
############################ MAIN()
main "$@"

2
weed/command/server.go

@ -1,10 +1,10 @@
package command package command
import ( import (
"fmt"
"github.com/chrislusf/raft/protobuf" "github.com/chrislusf/raft/protobuf"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper" "github.com/spf13/viper"
"fmt"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"

6
weed/server/master_grpc_server.go

@ -79,10 +79,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
} }
if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {
// process delta volume ids if exists for fast volume id updates // process delta volume ids if exists for fast volume id updates
for _, volInfo := range heartbeat.NewVolumes{
for _, volInfo := range heartbeat.NewVolumes {
message.NewVids = append(message.NewVids, volInfo.Id) message.NewVids = append(message.NewVids, volInfo.Id)
} }
for _, volInfo := range heartbeat.DeletedVolumes{
for _, volInfo := range heartbeat.DeletedVolumes {
message.DeletedVids = append(message.DeletedVids, volInfo.Id) message.DeletedVids = append(message.DeletedVids, volInfo.Id)
} }
// update master internal volume layouts // update master internal volume layouts
@ -92,7 +92,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn) newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn)
for _, v := range newVolumes { for _, v := range newVolumes {
glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url())
message.NewVids = append(message.NewVids, uint32(v.Id)) message.NewVids = append(message.NewVids, uint32(v.Id))
} }
for _, v := range deletedVolumes { for _, v := range deletedVolumes {

4
weed/server/volume_grpc_client_to_master.go

@ -99,7 +99,7 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA
select { select {
case volumeMessage := <-vs.store.NewVolumesChan: case volumeMessage := <-vs.store.NewVolumesChan:
deltaBeat := &master_pb.Heartbeat{ deltaBeat := &master_pb.Heartbeat{
NewVolumes:[]*master_pb.VolumeShortInformationMessage{
NewVolumes: []*master_pb.VolumeShortInformationMessage{
&volumeMessage, &volumeMessage,
}, },
} }
@ -110,7 +110,7 @@ func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcA
} }
case volumeMessage := <-vs.store.DeletedVolumesChan: case volumeMessage := <-vs.store.DeletedVolumesChan:
deltaBeat := &master_pb.Heartbeat{ deltaBeat := &master_pb.Heartbeat{
DeletedVolumes:[]*master_pb.VolumeShortInformationMessage{
DeletedVolumes: []*master_pb.VolumeShortInformationMessage{
&volumeMessage, &volumeMessage,
}, },
} }

1
weed/shell/command_volume_unmount.go

@ -50,7 +50,6 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *commandEnv, writer
} }
func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) {
return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{

1
weed/storage/needle/needle.go

@ -188,4 +188,3 @@ func ParseNeedleIdCookie(key_hash_string string) (NeedleId, Cookie, error) {
func (n *Needle) LastModifiedString() string { func (n *Needle) LastModifiedString() string {
return time.Unix(int64(n.LastModified), 0).Format("2006-01-02T15:04:05") return time.Unix(int64(n.LastModified), 0).Format("2006-01-02T15:04:05")
} }

3
weed/storage/needle/needle_read_write.go

@ -185,7 +185,7 @@ func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version
case Version2, Version3: case Version2, Version3:
err = n.readNeedleDataVersion2(bytes[NeedleHeaderSize : NeedleHeaderSize+int(n.Size)]) err = n.readNeedleDataVersion2(bytes[NeedleHeaderSize : NeedleHeaderSize+int(n.Size)])
} }
if err != nil && err != io.EOF{
if err != nil && err != io.EOF {
return err return err
} }
if size > 0 { if size > 0 {
@ -390,4 +390,3 @@ func (n *Needle) SetHasPairs() {
func getActualSize(size uint32, version Version) int64 { func getActualSize(size uint32, version Version) int64 {
return NeedleHeaderSize + NeedleBodyLength(size, version) return NeedleHeaderSize + NeedleBodyLength(size, version)
} }

6
weed/storage/needle/volume_id_test.go

@ -3,11 +3,11 @@ package needle
import "testing" import "testing"
func TestNewVolumeId(t *testing.T) { func TestNewVolumeId(t *testing.T) {
if _,err := NewVolumeId("1"); err != nil {
if _, err := NewVolumeId("1"); err != nil {
t.Error(err) t.Error(err)
} }
if _, err := NewVolumeId("a");err != nil {
if _, err := NewVolumeId("a"); err != nil {
t.Logf("a is not legal volume id, %v", err) t.Logf("a is not legal volume id, %v", err)
} }
} }
@ -42,4 +42,4 @@ func TestVolumeId_Next(t *testing.T) {
if new := pvid.Next(); new != 12 { if new := pvid.Next(); new != 12 {
t.Errorf("get next volume id failed") t.Errorf("get next volume id failed")
} }
}
}

4
weed/storage/volume_checking.go

@ -15,7 +15,7 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uin
return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e) return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e)
} }
if indexSize == 0 { if indexSize == 0 {
return 0,nil
return 0, nil
} }
var lastIdxEntry []byte var lastIdxEntry []byte
if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil { if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleMapEntrySize); e != nil {
@ -23,7 +23,7 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uin
} }
key, offset, size := IdxFileEntry(lastIdxEntry) key, offset, size := IdxFileEntry(lastIdxEntry)
if offset.IsZero() { if offset.IsZero() {
return 0,nil
return 0, nil
} }
if size == TombstoneFileSize { if size == TombstoneFileSize {
size = 0 size = 0

6
weed/storage/volume_info.go

@ -45,9 +45,9 @@ func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err er
func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi VolumeInfo, err error) { func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi VolumeInfo, err error) {
vi = VolumeInfo{ vi = VolumeInfo{
Id: needle.VolumeId(m.Id),
Collection: m.Collection,
Version: needle.Version(m.Version),
Id: needle.VolumeId(m.Id),
Collection: m.Collection,
Version: needle.Version(m.Version),
} }
rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
if e != nil { if e != nil {

4
weed/topology/node.go

@ -46,8 +46,8 @@ type NodeImpl struct {
maxVolumeCount int64 maxVolumeCount int64
parent Node parent Node
sync.RWMutex // lock children sync.RWMutex // lock children
children map[NodeId]Node
maxVolumeId needle.VolumeId
children map[NodeId]Node
maxVolumeId needle.VolumeId
//for rack, data center, topology //for rack, data center, topology
nodeType string nodeType string

4
weed/topology/topology_test.go

@ -114,10 +114,10 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
nil, nil,
dn) dn)
for vid, _ := range layout.vid2location{
for vid, _ := range layout.vid2location {
println("after add volume id", vid) println("after add volume id", vid)
} }
for _, vid := range layout.writables{
for _, vid := range layout.writables {
println("after add writable volume id", vid) println("after add writable volume id", vid)
} }

1
weed/util/compression.go

@ -91,4 +91,3 @@ func UnGzipData(input []byte) ([]byte, error) {
return false, false return false, false
} }
Loading…
Cancel
Save