From ed848425c77ccca0a9d2f30c7f631bc50f28cd32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Thu, 29 Sep 2016 13:57:23 +0800 Subject: [PATCH 01/61] supplemental data between compacting and commit compacting --- weed/storage/volume.go | 8 +++ weed/storage/volume_vacuum.go | 110 +++++++++++++++++++++++++++++++--- 2 files changed, 110 insertions(+), 8 deletions(-) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 801dfe267..258787701 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -10,6 +10,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" ) +type keyField struct { + offset uint32 + size uint32 +} + type Volume struct { Id VolumeId dir string @@ -23,6 +28,9 @@ type Volume struct { dataFileAccessLock sync.Mutex lastModifiedTime uint64 //unix time in seconds + + lastCompactingIndexOffset uint64 + incrementedHasUpdatedIndexEntry map[uint64]keyField } func NewVolume(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL) (v *Volume, e error) { diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 51d74e311..55c248894 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -6,6 +6,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) func (v *Volume) garbageLevel() float64 { @@ -20,7 +21,8 @@ func (v *Volume) Compact() error { //glog.V(3).Infof("Got Compaction lock...") filePath := v.FileName() - glog.V(3).Infof("creating copies for volume %d ...", v.Id) + v.lastCompactingIndexOffset = v.nm.IndexFileSize() + glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactingIndexOffset) return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx") } @@ -38,14 +40,28 @@ func (v *Volume) commitCompact() error { glog.V(3).Infof("Got Committing lock...") v.nm.Close() _ = v.dataFile.Close() - makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx") + var e error - if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { - return e - } - if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { - return e + if e = v.makeupDiff(v.FileName()+".cpd", v.FileName()+".cpx", v.FileName()+".dat", v.FileName()+".idx"); e != nil { + glog.V(0).Infof("makeupDiff in commitCompact failed %v", e) + e = os.Remove(v.FileName() + ".cpd") + if e != nil { + return e + } + e = os.Remove(v.FileName() + ".cpx") + if e != nil { + return e + } + } else { + var e error + if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { + return e + } + if e = os.Rename(v.FileName()+".cpx", v.FileName()+".idx"); e != nil { + return e + } } + //glog.V(3).Infof("Pretending to be vacuuming...") //time.Sleep(20 * time.Second) glog.V(3).Infof("Loading Commit file...") @@ -55,7 +71,85 @@ func (v *Volume) commitCompact() error { return nil } -func makeupDiff(newDatFile, newIdxFile, oldDatFile, oldIdxFile string) (err error) { +func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) { + var indexSize int64 + + oldIdxFile, err := os.Open(oldIdxFileName) + defer oldIdxFile.Close() + + oldDatFile, err := os.Open(oldDatFileName) + defer oldDatFile.Close() + + if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil { + return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err) + } + if indexSize == 0 || uint64(indexSize) <= v.lastCompactingIndexOffset { + return nil + } + + v.incrementedHasUpdatedIndexEntry = make(map[uint64]keyField) + for idx_offset := indexSize; uint64(idx_offset) >= v.lastCompactingIndexOffset; idx_offset -= NeedleIndexSize { + var IdxEntry []byte + if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idx_offset); err != nil { + return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idx_offset, err) + } + key, offset, size := idxFileEntry(IdxEntry) + if _, found := v.incrementedHasUpdatedIndexEntry[key]; !found { + v.incrementedHasUpdatedIndexEntry[key] = keyField{ + offset: offset, + size: size, + } + } else { + continue + } + } + + if len(v.incrementedHasUpdatedIndexEntry) > 0 { + var ( + dst, idx *os.File + ) + if dst, err = os.OpenFile(newDatFileName, os.O_WRONLY, 0644); err != nil { + return + } + defer dst.Close() + + if idx, err = os.OpenFile(newIdxFileName, os.O_WRONLY, 0644); err != nil { + return + } + defer idx.Close() + + idx_entry_bytes := make([]byte, 16) + for key, incre_idx_entry := range v.incrementedHasUpdatedIndexEntry { + util.Uint64toBytes(idx_entry_bytes[0:8], key) + util.Uint32toBytes(idx_entry_bytes[8:12], incre_idx_entry.offset) + util.Uint32toBytes(idx_entry_bytes[12:16], incre_idx_entry.size) + + if _, err := idx.Seek(0, 2); err != nil { + return fmt.Errorf("cannot seek end of indexfile %s: %v", + newIdxFileName, err) + } + _, err = idx.Write(idx_entry_bytes) + + //even the needle cache in memory is hit, the need_bytes is correct + needle_bytes, _, _ := ReadNeedleBlob(dst, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size) + + var offset int64 + if offset, err = dst.Seek(0, 2); err != nil { + glog.V(0).Infof("failed to seek the end of file: %v", err) + return + } + //ensure file writing starting from aligned positions + if offset%NeedlePaddingSize != 0 { + offset = offset + (NeedlePaddingSize - offset%NeedlePaddingSize) + if offset, err = v.dataFile.Seek(offset, 0); err != nil { + glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err) + return + } + } + dst.Write(needle_bytes) + } + } + return nil } From ce1f7ab66250cf3ec82e85c458ec273170e7531a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Fri, 7 Oct 2016 16:22:24 +0800 Subject: [PATCH 02/61] makediff func with UT case --- weed/storage/volume.go | 9 +-- weed/storage/volume_checking.go | 1 - weed/storage/volume_vacuum.go | 100 ++++++++++++++++++++++------- weed/storage/volume_vacuum_test.go | 53 +++++++++++++++ 4 files changed, 132 insertions(+), 31 deletions(-) create mode 100644 weed/storage/volume_vacuum_test.go diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 258787701..c1d531376 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -10,11 +10,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" ) -type keyField struct { - offset uint32 - size uint32 -} - type Volume struct { Id VolumeId dir string @@ -29,8 +24,8 @@ type Volume struct { dataFileAccessLock sync.Mutex lastModifiedTime uint64 //unix time in seconds - lastCompactingIndexOffset uint64 - incrementedHasUpdatedIndexEntry map[uint64]keyField + lastCompactIndexOffset uint64 + lastCompactRevision uint16 } func NewVolume(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL) (v *Volume, e error) { diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index d424010f1..48f707594 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -21,7 +21,6 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error { return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e) } key, offset, size := idxFileEntry(lastIdxEntry) - //deleted index entry could not point to deleted needle if offset == 0 { return nil } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 55c248894..723300557 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -21,8 +21,9 @@ func (v *Volume) Compact() error { //glog.V(3).Infof("Got Compaction lock...") filePath := v.FileName() - v.lastCompactingIndexOffset = v.nm.IndexFileSize() - glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactingIndexOffset) + v.lastCompactIndexOffset = v.nm.IndexFileSize() + v.lastCompactRevision = v.SuperBlock.CompactRevision + glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx") } @@ -71,6 +72,21 @@ func (v *Volume) commitCompact() error { return nil } +func fetchCompactRevisionFromDatFile(file *os.File) (compactRevision uint16, err error) { + if _, err = file.Seek(0, 0); err != nil { + return 0, fmt.Errorf("cannot seek to the beginning of %s: %v", file.Name(), err) + } + header := make([]byte, SuperBlockSize) + if _, e := file.Read(header); e != nil { + return 0, fmt.Errorf("cannot read file %s 's super block: %v", file.Name(), e) + } + superBlock, err := ParseSuperBlock(header) + if err != nil { + return 0, err + } + return superBlock.CompactRevision, nil +} + func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldIdxFileName string) (err error) { var indexSize int64 @@ -83,56 +99,67 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI if indexSize, err = verifyIndexFileIntegrity(oldIdxFile); err != nil { return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", oldIdxFileName, err) } - if indexSize == 0 || uint64(indexSize) <= v.lastCompactingIndexOffset { + if indexSize == 0 || uint64(indexSize) <= v.lastCompactIndexOffset { return nil } - v.incrementedHasUpdatedIndexEntry = make(map[uint64]keyField) - for idx_offset := indexSize; uint64(idx_offset) >= v.lastCompactingIndexOffset; idx_offset -= NeedleIndexSize { + oldDatCompactRevision, err := fetchCompactRevisionFromDatFile(oldDatFile) + if err != nil { + return + } + if oldDatCompactRevision != v.lastCompactRevision { + return fmt.Errorf("current old dat file's compact revision %d is not the expected one %d", oldDatCompactRevision, v.lastCompactRevision) + } + + type keyField struct { + offset uint32 + size uint32 + } + incrementedHasUpdatedIndexEntry := make(map[uint64]keyField) + + for idx_offset := indexSize - NeedleIndexSize; uint64(idx_offset) >= v.lastCompactIndexOffset; idx_offset -= NeedleIndexSize { var IdxEntry []byte if IdxEntry, err = readIndexEntryAtOffset(oldIdxFile, idx_offset); err != nil { return fmt.Errorf("readIndexEntry %s at offset %d failed: %v", oldIdxFileName, idx_offset, err) } key, offset, size := idxFileEntry(IdxEntry) - if _, found := v.incrementedHasUpdatedIndexEntry[key]; !found { - v.incrementedHasUpdatedIndexEntry[key] = keyField{ + if _, found := incrementedHasUpdatedIndexEntry[key]; !found { + incrementedHasUpdatedIndexEntry[key] = keyField{ offset: offset, size: size, } - } else { - continue } } - if len(v.incrementedHasUpdatedIndexEntry) > 0 { + if len(incrementedHasUpdatedIndexEntry) > 0 { var ( dst, idx *os.File ) - if dst, err = os.OpenFile(newDatFileName, os.O_WRONLY, 0644); err != nil { + if dst, err = os.OpenFile(newDatFileName, os.O_RDWR, 0644); err != nil { return } defer dst.Close() - if idx, err = os.OpenFile(newIdxFileName, os.O_WRONLY, 0644); err != nil { + if idx, err = os.OpenFile(newIdxFileName, os.O_RDWR, 0644); err != nil { return } defer idx.Close() + var newDatCompactRevision uint16 + newDatCompactRevision, err = fetchCompactRevisionFromDatFile(dst) + if err != nil { + return + } + if oldDatCompactRevision+1 != newDatCompactRevision { + return fmt.Errorf("oldDatFile %s 's compact revision is %d while newDatFile %s 's compact revision is %d", oldDatFileName, oldDatCompactRevision, newDatFileName, newDatCompactRevision) + } + idx_entry_bytes := make([]byte, 16) - for key, incre_idx_entry := range v.incrementedHasUpdatedIndexEntry { + for key, incre_idx_entry := range incrementedHasUpdatedIndexEntry { util.Uint64toBytes(idx_entry_bytes[0:8], key) util.Uint32toBytes(idx_entry_bytes[8:12], incre_idx_entry.offset) util.Uint32toBytes(idx_entry_bytes[12:16], incre_idx_entry.size) - if _, err := idx.Seek(0, 2); err != nil { - return fmt.Errorf("cannot seek end of indexfile %s: %v", - newIdxFileName, err) - } - _, err = idx.Write(idx_entry_bytes) - - //even the needle cache in memory is hit, the need_bytes is correct - needle_bytes, _, _ := ReadNeedleBlob(dst, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size) - var offset int64 if offset, err = dst.Seek(0, 2); err != nil { glog.V(0).Infof("failed to seek the end of file: %v", err) @@ -146,7 +173,34 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return } } - dst.Write(needle_bytes) + + //updated needle + if incre_idx_entry.offset != 0 && incre_idx_entry.size != 0 { + //even the needle cache in memory is hit, the need_bytes is correct + var needle_bytes []byte + needle_bytes, _, err = ReadNeedleBlob(oldDatFile, int64(incre_idx_entry.offset)*NeedlePaddingSize, incre_idx_entry.size) + if err != nil { + return + } + dst.Write(needle_bytes) + util.Uint32toBytes(idx_entry_bytes[8:12], uint32(offset/NeedlePaddingSize)) + } else { //deleted needle + //fakeDelNeedle 's default Data field is nil + fakeDelNeedle := new(Needle) + fakeDelNeedle.Id = key + fakeDelNeedle.Cookie = 0x12345678 + _, err = fakeDelNeedle.Append(dst, v.Version()) + if err != nil { + return + } + util.Uint32toBytes(idx_entry_bytes[8:12], uint32(0)) + } + + if _, err := idx.Seek(0, 2); err != nil { + return fmt.Errorf("cannot seek end of indexfile %s: %v", + newIdxFileName, err) + } + _, err = idx.Write(idx_entry_bytes) } } diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go new file mode 100644 index 000000000..02d1a2b56 --- /dev/null +++ b/weed/storage/volume_vacuum_test.go @@ -0,0 +1,53 @@ +package storage + +import ( + "testing" +) + +/* +makediff test steps +1. launch weed server at your local/dev environment, (option +"garbageThreshold" for master and option "max" for volume should be set with specific value which would let +preparing test prerequisite easier ) + a) ./weed master -garbageThreshold=0.99 -mdir=./m + b) ./weed volume -dir=./data -max=1 -mserver=localhost:9333 -port=8080 +2. upload 4 different files, you could call dir/assign to get 4 different fids + a) upload file A with fid a + b) upload file B with fid b + c) upload file C with fid c + d) upload file D with fid d +3. update file A and C + a) modify file A and upload file A with fid a + b) modify file C and upload file C with fid c + c) record the current 1.idx's file size(lastCompactIndexOffset value) +4. Compacting the data file + a) run curl http://localhost:8080/admin/vacuum/compact?volumeId=1 + b) verify the 1.cpd and 1.cpx is created under volume directory +5. update file B and delete file D + a) modify file B and upload file B with fid b + d) delete file B with fid b +6. Now you could run the following UT case, the case should be run successfully +7. Compact commit manually + a) mv 1.cpd 1.dat + b) mv 1.cpx 1.idx +8. Restart Volume Server +9. Now you should get updated file A,B,C +*/ + +func TestMakeDiff(t *testing.T) { + + v := new(Volume) + //lastCompactIndexOffset value is the index file size before step 4 + v.lastCompactIndexOffset = 96 + v.SuperBlock.version = 0x2 + err := v.makeupDiff( + "/yourpath/1.cpd", + "/yourpath/1.cpx", + "/yourpath/1.dat", + "/yourpath/1.idx") + if err != nil { + t.Errorf("makeupDiff err is %v", err) + } else { + t.Log("makeupDiff Succeeded") + } +} From 7d382ba5fec2539821047e81b4f9a8ce20af0331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Fri, 7 Oct 2016 16:34:22 +0800 Subject: [PATCH 03/61] comment UT case --- weed/storage/volume_vacuum_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 02d1a2b56..8ab59404d 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -34,6 +34,7 @@ preparing test prerequisite easier ) 9. Now you should get updated file A,B,C */ +/* func TestMakeDiff(t *testing.T) { v := new(Volume) @@ -51,3 +52,4 @@ func TestMakeDiff(t *testing.T) { t.Log("makeupDiff Succeeded") } } +*/ From 7d73bbb07399cc504ae2ebdcfdc164dc01295916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Fri, 7 Oct 2016 16:40:51 +0800 Subject: [PATCH 04/61] comment UT case --- weed/storage/volume_vacuum_test.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 8ab59404d..c2fac6ce8 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -34,22 +34,22 @@ preparing test prerequisite easier ) 9. Now you should get updated file A,B,C */ -/* func TestMakeDiff(t *testing.T) { v := new(Volume) //lastCompactIndexOffset value is the index file size before step 4 v.lastCompactIndexOffset = 96 v.SuperBlock.version = 0x2 - err := v.makeupDiff( - "/yourpath/1.cpd", - "/yourpath/1.cpx", - "/yourpath/1.dat", - "/yourpath/1.idx") - if err != nil { - t.Errorf("makeupDiff err is %v", err) - } else { - t.Log("makeupDiff Succeeded") - } + /* + err := v.makeupDiff( + "/yourpath/1.cpd", + "/yourpath/1.cpx", + "/yourpath/1.dat", + "/yourpath/1.idx") + if err != nil { + t.Errorf("makeupDiff err is %v", err) + } else { + t.Log("makeupDiff Succeeded") + } + */ } -*/ From 48a24559a3c46efaa99ca6f3d43715e545381441 Mon Sep 17 00:00:00 2001 From: Amin Cheloh Date: Wed, 12 Oct 2016 16:47:56 +0700 Subject: [PATCH 05/61] Update entrypoint.sh Running application becomes container's PID 1 allow to receives Unix signals --- docker/entrypoint.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index bdde2caa4..34ab61148 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -8,7 +8,7 @@ case "$1" in if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" fi - /usr/bin/weed $@ $ARGS + exec /usr/bin/weed $@ $ARGS ;; 'volume') @@ -17,7 +17,7 @@ case "$1" in if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" fi - /usr/bin/weed $@ $ARGS + exec /usr/bin/weed $@ $ARGS ;; 'server') @@ -25,10 +25,10 @@ case "$1" in if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" fi - /usr/bin/weed $@ $ARGS + exec /usr/bin/weed $@ $ARGS ;; *) - /usr/bin/weed $@ + exec /usr/bin/weed $@ ;; -esac +esac From 54bd1c406a70fab3f71332511897460fed32e568 Mon Sep 17 00:00:00 2001 From: Amin Cheloh Date: Wed, 12 Oct 2016 17:10:38 +0700 Subject: [PATCH 06/61] Update Dockerfile Move COPY /entrypoint.sh to bottom and make sure entrypoint.sh have execute permission --- docker/Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index ed1c4dfe3..21e5a7b47 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,8 +1,5 @@ FROM progrium/busybox -COPY entrypoint.sh /entrypoint.sh -COPY Dockerfile /etc/Dockerfile - RUN opkg-install curl RUN echo tlsv1 >> ~/.curlrc @@ -15,4 +12,7 @@ EXPOSE 9333 VOLUME /data +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + ENTRYPOINT ["/entrypoint.sh"] From 4194ae33fd1e27515a3f38d932c8cab2c276ecf5 Mon Sep 17 00:00:00 2001 From: Yang zhixiang Date: Thu, 3 Nov 2016 15:46:59 +0800 Subject: [PATCH 07/61] fix raft_server.go isPeersChanged bug --- weed/server/raft_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index a35659818..8b2ad3540 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -144,7 +144,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, sort.Strings(peers) sort.Strings(oldPeers) - return oldPeers, reflect.DeepEqual(peers, oldPeers) + return oldPeers, !reflect.DeepEqual(peers, oldPeers) } From df49692dff75577f5eedc4d4251f1224b3f12799 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 4 Nov 2016 20:42:28 -0700 Subject: [PATCH 08/61] add tool to change replication or ttl fix https://github.com/chrislusf/seaweedfs/issues/386 --- .../change_superblock.go} | 43 +++++++++++++------ 1 file changed, 31 insertions(+), 12 deletions(-) rename unmaintained/{change_replication/change_replication.go => change_superblock/change_superblock.go} (67%) diff --git a/unmaintained/change_replication/change_replication.go b/unmaintained/change_superblock/change_superblock.go similarity index 67% rename from unmaintained/change_replication/change_replication.go rename to unmaintained/change_superblock/change_superblock.go index c32d2d266..6ecead697 100644 --- a/unmaintained/change_replication/change_replication.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -16,6 +16,7 @@ var ( fixVolumeCollection = flag.String("collection", "", "the volume collection name") fixVolumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") targetReplica = flag.String("replication", "", "If just empty, only print out current replication setting.") + targetTTL = flag.String("ttl", "", "If just empty, only print out current ttl setting.") ) /* @@ -58,27 +59,45 @@ func main() { } fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement) + fmt.Printf("Current Volume TTL: %s\n", superBlock.Ttl.String()) - if *targetReplica == "" { - return - } + hasChange := false - replica, err := storage.NewReplicaPlacementFromString(*targetReplica) + if *targetReplica != "" { + replica, err := storage.NewReplicaPlacementFromString(*targetReplica) - if err != nil { - glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) + if err != nil { + glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) + } + + fmt.Printf("Changing replication to: %s\n", replica) + + superBlock.ReplicaPlacement = replica + hasChange = true } - fmt.Printf("Changing to: %s\n", replica) + if *targetTTL != "" { + ttl, err := storage.ReadTTL(*targetTTL) - superBlock.ReplicaPlacement = replica + if err != nil { + glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) + } - header = superBlock.Bytes() + fmt.Printf("Changing ttl to: %s\n", ttl) - if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil { - glog.Fatalf("cannot write super block: %v", e) + superBlock.Ttl = ttl + hasChange = true } - fmt.Println("Done.") + if hasChange { + + header = superBlock.Bytes() + + if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil { + glog.Fatalf("cannot write super block: %v", e) + } + + fmt.Println("Change Applied.") + } } From 36f96332238a37cb659bb3ff27d82febb1b22c91 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 6 Nov 2016 20:55:22 -0800 Subject: [PATCH 09/61] add locks for location.volumes fix https://github.com/chrislusf/seaweedfs/issues/392 --- weed/storage/disk_location.go | 39 +++++++++++++++++++++++++++++++++++ weed/storage/store.go | 14 +++++++------ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index cc3c83b63..fc11a411f 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -3,6 +3,7 @@ package storage import ( "io/ioutil" "strings" + "sync" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -11,6 +12,7 @@ type DiskLocation struct { Directory string MaxVolumeCount int volumes map[VolumeId]*Volume + sync.RWMutex } func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { @@ -20,6 +22,8 @@ func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { } func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { + l.Lock() + defer l.Unlock() if dirs, err := ioutil.ReadDir(l.Directory); err == nil { for _, dir := range dirs { @@ -48,6 +52,9 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { } func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) { + l.Lock() + defer l.Unlock() + for k, v := range l.volumes { if v.Collection == collection { e = l.deleteVolumeById(k) @@ -71,3 +78,35 @@ func (l *DiskLocation) deleteVolumeById(vid VolumeId) (e error) { delete(l.volumes, vid) return } + +func (l *DiskLocation) SetVolume(vid VolumeId, volume *Volume) { + l.Lock() + defer l.Unlock() + + l.volumes[vid] = volume +} + +func (l *DiskLocation) FindVolume(vid VolumeId) (*Volume, bool) { + l.RLock() + defer l.RUnlock() + + v, ok := l.volumes[vid] + return v, ok +} + +func (l *DiskLocation) VolumesLen() int { + l.RLock() + defer l.RUnlock() + + return len(l.volumes) +} + +func (l *DiskLocation) Close() { + l.Lock() + defer l.Unlock() + + for _, v := range l.volumes { + v.Close() + } + return +} diff --git a/weed/storage/store.go b/weed/storage/store.go index d6c7172e7..614c87ace 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -143,7 +143,7 @@ func (s *Store) DeleteCollection(collection string) (e error) { func (s *Store) findVolume(vid VolumeId) *Volume { for _, location := range s.Locations { - if v, found := location.volumes[vid]; found { + if v, found := location.FindVolume(vid); found { return v } } @@ -152,7 +152,7 @@ func (s *Store) findVolume(vid VolumeId) *Volume { func (s *Store) findFreeLocation() (ret *DiskLocation) { max := 0 for _, location := range s.Locations { - currentFreeCount := location.MaxVolumeCount - len(location.volumes) + currentFreeCount := location.MaxVolumeCount - location.VolumesLen() if currentFreeCount > max { max = currentFreeCount ret = location @@ -168,7 +168,7 @@ func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleM glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl); err == nil { - location.volumes[vid] = volume + location.SetVolume(vid, volume) return nil } else { return err @@ -180,6 +180,7 @@ func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleM func (s *Store) Status() []*VolumeInfo { var stats []*VolumeInfo for _, location := range s.Locations { + location.RLock() for k, v := range location.volumes { s := &VolumeInfo{ Id: VolumeId(k), @@ -194,6 +195,7 @@ func (s *Store) Status() []*VolumeInfo { Ttl: v.Ttl} stats = append(stats, s) } + location.RUnlock() } sortVolumeInfos(stats) return stats @@ -219,6 +221,7 @@ func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.S var maxFileKey uint64 for _, location := range s.Locations { maxVolumeCount = maxVolumeCount + location.MaxVolumeCount + location.Lock() for k, v := range location.volumes { if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() @@ -246,6 +249,7 @@ func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.S } } } + location.Unlock() } joinMessage := &operation.JoinMessage{ @@ -290,9 +294,7 @@ func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.S } func (s *Store) Close() { for _, location := range s.Locations { - for _, v := range location.volumes { - v.Close() - } + location.Close() } } func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) { From 57005ed7f8cb9746e791ccde27fa8525c501c20f Mon Sep 17 00:00:00 2001 From: Yang zhixiang Date: Tue, 8 Nov 2016 11:00:46 +0800 Subject: [PATCH 10/61] fix bug : join post is empty --- weed/server/raft_server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 8b2ad3540..3652d7add 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -165,7 +165,7 @@ func (s *RaftServer) Join(peers []string) error { target := fmt.Sprintf("http://%s/cluster/join", strings.TrimSpace(m)) glog.V(0).Infoln("Attempting to connect to:", target) - err = postFollowingOneRedirect(target, "application/json", &b) + err = postFollowingOneRedirect(target, "application/json", b) if err != nil { glog.V(0).Infoln("Post returned error: ", err.Error()) @@ -182,9 +182,9 @@ func (s *RaftServer) Join(peers []string) error { } // a workaround because http POST following redirection misses request body -func postFollowingOneRedirect(target string, contentType string, b *bytes.Buffer) error { +func postFollowingOneRedirect(target string, contentType string, b bytes.Buffer) error { backupReader := bytes.NewReader(b.Bytes()) - resp, err := http.Post(target, contentType, b) + resp, err := http.Post(target, contentType, &b) if err != nil { return err } From 096ffa9744bbca5d9bbed413b6c54ed5da05df39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Fri, 11 Nov 2016 11:53:22 +0800 Subject: [PATCH 11/61] concurrent loading volume --- weed/storage/disk_location.go | 61 ++++++++++++++++++++++--------- weed/storage/needle_map_memory.go | 2 +- 2 files changed, 44 insertions(+), 19 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index fc11a411f..d91825c24 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -2,6 +2,7 @@ package storage import ( "io/ioutil" + "os" "strings" "sync" @@ -25,29 +26,53 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { l.Lock() defer l.Unlock() - if dirs, err := ioutil.ReadDir(l.Directory); err == nil { - for _, dir := range dirs { - name := dir.Name() - if !dir.IsDir() && strings.HasSuffix(name, ".dat") { - collection := "" - base := name[:len(name)-len(".dat")] - i := strings.LastIndex(base, "_") - if i > 0 { - collection, base = base[0:i], base[i+1:] - } - if vid, err := NewVolumeId(base); err == nil { - if l.volumes[vid] == nil { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { - l.volumes[vid] = v - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) - } else { - glog.V(0).Infof("new volume %s error %s", name, e) + task_queue := make(chan os.FileInfo, 100) + go func() { + if dirs, err := ioutil.ReadDir(l.Directory); err == nil { + for _, dir := range dirs { + task_queue <- dir + } + } + close(task_queue) + }() + + const concurrency int = 10 + var wg sync.WaitGroup + var mutex sync.RWMutex + for workerNum := 0; workerNum < concurrency; workerNum++ { + wg.Add(1) + go func() { + defer wg.Done() + for dir := range task_queue { + name := dir.Name() + if !dir.IsDir() && strings.HasSuffix(name, ".dat") { + collection := "" + base := name[:len(name)-len(".dat")] + i := strings.LastIndex(base, "_") + if i > 0 { + collection, base = base[0:i], base[i+1:] + } + if vid, err := NewVolumeId(base); err == nil { + mutex.RLock() + _, found := l.volumes[vid] + mutex.RUnlock() + if !found { + if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { + mutex.Lock() + l.volumes[vid] = v + mutex.Unlock() + glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) + } else { + glog.V(0).Infof("new volume %s error %s", name, e) + } } } } } - } + }() } + wg.Wait() + glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount) } diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index f2f4835df..195d8bdbc 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -48,7 +48,7 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) { } return nil }) - glog.V(1).Infoln("max file key:", nm.MaximumFileKey) + glog.V(1).Infof("max file key: %d for file: %s", nm.MaximumFileKey, file.Name()) return nm, e } From b9f385bd666d256d8b3be130b10e1069c9f231b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9C=8D=E6=99=93=E6=A0=8B?= Date: Sun, 13 Nov 2016 13:24:52 +0800 Subject: [PATCH 12/61] refactor concurrent loading code logic --- weed/storage/disk_location.go | 74 +++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 29 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index d91825c24..e7604a734 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -22,11 +22,43 @@ func NewDiskLocation(dir string, maxVolumeCount int) *DiskLocation { return location } -func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { - l.Lock() - defer l.Unlock() +func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleMapType, mutex *sync.RWMutex) { + name := dir.Name() + if !dir.IsDir() && strings.HasSuffix(name, ".dat") { + collection := "" + base := name[:len(name)-len(".dat")] + i := strings.LastIndex(base, "_") + if i > 0 { + collection, base = base[0:i], base[i+1:] + } + if vid, err := NewVolumeId(base); err == nil { + mutex.RLock() + _, found := l.volumes[vid] + mutex.RUnlock() + if !found { + if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { + mutex.Lock() + l.volumes[vid] = v + mutex.Unlock() + glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) + } else { + glog.V(0).Infof("new volume %s error %s", name, e) + } + } + } + } +} - task_queue := make(chan os.FileInfo, 100) +func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrentFlag bool) { + var concurrency int + if concurrentFlag { + //You could choose a better optimized concurency value after testing at your environment + concurrency = 10 + } else { + concurrency = 1 + } + + task_queue := make(chan os.FileInfo, 10*concurrency) go func() { if dirs, err := ioutil.ReadDir(l.Directory); err == nil { for _, dir := range dirs { @@ -36,7 +68,6 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { close(task_queue) }() - const concurrency int = 10 var wg sync.WaitGroup var mutex sync.RWMutex for workerNum := 0; workerNum < concurrency; workerNum++ { @@ -44,35 +75,20 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { go func() { defer wg.Done() for dir := range task_queue { - name := dir.Name() - if !dir.IsDir() && strings.HasSuffix(name, ".dat") { - collection := "" - base := name[:len(name)-len(".dat")] - i := strings.LastIndex(base, "_") - if i > 0 { - collection, base = base[0:i], base[i+1:] - } - if vid, err := NewVolumeId(base); err == nil { - mutex.RLock() - _, found := l.volumes[vid] - mutex.RUnlock() - if !found { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { - mutex.Lock() - l.volumes[vid] = v - mutex.Unlock() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) - } else { - glog.V(0).Infof("new volume %s error %s", name, e) - } - } - } - } + l.loadExistingVolume(dir, needleMapKind, &mutex) } }() } wg.Wait() +} + +func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapType) { + l.Lock() + defer l.Unlock() + + l.concurrentLoadingVolumes(needleMapKind, true) + glog.V(0).Infoln("Store started on dir:", l.Directory, "with", len(l.volumes), "volumes", "max", l.MaxVolumeCount) } From f54f530ada119f9bfe45d087c5474a2c70e2b9a1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 13 Nov 2016 14:07:51 -0800 Subject: [PATCH 13/61] adjust verbose logging --- weed/storage/volume.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index c1d531376..dfd623eaa 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -95,9 +95,9 @@ func (v *Volume) expired(volumeSizeLimit uint64) bool { if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(0).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTime) + glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTime) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTime)) / 60 - glog.V(0).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) + glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } From dcaf1796fe0e41e39ac5c9602cb190cf150e7849 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 16 Nov 2016 07:09:57 -0800 Subject: [PATCH 14/61] add option to enable caching --- weed/command/server.go | 2 ++ weed/command/volume.go | 3 +++ weed/server/volume_server.go | 4 +++- weed/storage/needle_byte_cache.go | 21 +++++++++++++-------- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/weed/command/server.go b/weed/command/server.go index eed7dcae4..027ba191d 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -72,6 +72,7 @@ var ( volumeFixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", true, "Adjust jpg orientation when uploading.") volumeReadRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") volumeServerPublicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") + volumeEnableBytesCache = cmdServer.Flag.Bool("volume.cache.enable", false, "direct cache instead of OS cache, cost more memory.") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") serverWhiteList []string @@ -259,6 +260,7 @@ func runServer(cmd *Command, args []string) bool { volumeNeedleMapKind, *serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack, serverWhiteList, *volumeFixJpgOrientation, *volumeReadRedirect, + *volumeEnableBytesCache, ) glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*volumePort)) diff --git a/weed/command/volume.go b/weed/command/volume.go index 21369cbe9..ba498b8e4 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -36,6 +36,7 @@ type VolumeServerOptions struct { indexType *string fixJpgOrientation *bool readRedirect *bool + enableBytesCache *bool } func init() { @@ -54,6 +55,7 @@ func init() { v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb] mode for memory~performance balance.") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", true, "Adjust jpg orientation when uploading.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") + v.enableBytesCache = cmdVolume.Flag.Bool("cache.enable", false, "direct cache instead of OS cache, cost more memory.") } var cmdVolume = &Command{ @@ -132,6 +134,7 @@ func runVolume(cmd *Command, args []string) bool { *v.master, *v.pulseSeconds, *v.dataCenter, *v.rack, v.whiteList, *v.fixJpgOrientation, *v.readRedirect, + *v.enableBytesCache, ) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 79a4276b1..1a912a169 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -33,7 +33,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, dataCenter string, rack string, whiteList []string, fixJpgOrientation bool, - readRedirect bool) *VolumeServer { + readRedirect bool, + enableBytesCache bool) *VolumeServer { vs := &VolumeServer{ pulseSeconds: pulseSeconds, dataCenter: dataCenter, @@ -44,6 +45,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, } vs.SetMasterNode(masterNode) vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) + storage.EnableBytesCache = enableBytesCache vs.guard = security.NewGuard(whiteList, "") diff --git a/weed/storage/needle_byte_cache.go b/weed/storage/needle_byte_cache.go index ae35a48ba..dfc32bcbf 100644 --- a/weed/storage/needle_byte_cache.go +++ b/weed/storage/needle_byte_cache.go @@ -11,8 +11,9 @@ import ( ) var ( - bytesCache *lru.Cache - bytesPool *util.BytesPool + EnableBytesCache = true + bytesCache *lru.Cache + bytesPool *util.BytesPool ) /* @@ -48,11 +49,13 @@ func (block *Block) increaseReference() { func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) { // check cache, return if found cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize) - if obj, found := bytesCache.Get(cacheKey); found { - block = obj.(*Block) - block.increaseReference() - dataSlice = block.Bytes[0:readSize] - return dataSlice, block, nil + if EnableBytesCache { + if obj, found := bytesCache.Get(cacheKey); found { + block = obj.(*Block) + block.increaseReference() + dataSlice = block.Bytes[0:readSize] + return dataSlice, block, nil + } } // get the []byte from pool @@ -61,7 +64,9 @@ func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []b block = &Block{Bytes: b, refCount: 2} dataSlice = block.Bytes[0:readSize] _, err = r.ReadAt(dataSlice, offset) - bytesCache.Add(cacheKey, block) + if EnableBytesCache { + bytesCache.Add(cacheKey, block) + } return dataSlice, block, err } From 9ba52db5858c480cb8cf4125b0d698f188f22ba8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 26 Nov 2016 12:14:06 -0800 Subject: [PATCH 15/61] add writable checking fix https://github.com/chrislusf/seaweedfs/issues/405 --- weed/topology/topology_vacuum.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 3a6fcbae2..a7ef52336 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -86,7 +86,14 @@ func (t *Topology) Vacuum(garbageThreshold string) int { for _, vl := range c.storageType2VolumeLayout.Items() { if vl != nil { volumeLayout := vl.(*VolumeLayout) + writableSet := make(map[storage.VolumeId]bool) + for _, id := range volumeLayout.writables { + writableSet[id] = true + } for vid, locationlist := range volumeLayout.vid2location { + if _, isWritable := writableSet[vid]; !isWritable { + continue + } glog.V(0).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) if batchVacuumVolumeCheck(volumeLayout, vid, locationlist, garbageThreshold) { if batchVacuumVolumeCompact(volumeLayout, vid, locationlist) { From 5385bf9555f32adefe25457c025b2e895d94ac7c Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 11:27:40 +0800 Subject: [PATCH 16/61] update cassandra connections update cassandra connections --- weed/filer/cassandra_store/cassandra_store.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index 50a792a65..e9e1d9222 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -32,7 +32,12 @@ type CassandraStore struct { func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err error) { c = &CassandraStore{} - c.cluster = gocql.NewCluster(hosts...) + s := strings.Split(hosts, ",") + if len(s) == 1 { + c.cluster = gocql.NewCluster(hosts...) + } else if len(s) > 1 { + c.cluster = gocql.NewCluster(s[0], s[1]) + } c.cluster.Keyspace = keyspace c.cluster.Consistency = gocql.Quorum c.session, err = c.cluster.CreateSession() From 89ccb6be05a82631377ddbdbe117df51958bbbf1 Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 12:16:51 +0800 Subject: [PATCH 17/61] Update cassandra_store.go --- weed/filer/cassandra_store/cassandra_store.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index e9e1d9222..3005fead9 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -34,8 +34,11 @@ func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { + fmt.Println("000") c.cluster = gocql.NewCluster(hosts...) } else if len(s) > 1 { + fmt.Println("111",s[0]) + fmt.Println("222",s[1]) c.cluster = gocql.NewCluster(s[0], s[1]) } c.cluster.Keyspace = keyspace From 809aa028ec7be5c6f19945d81d3b3456780b2bc2 Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 15:01:04 +0800 Subject: [PATCH 18/61] fix the connection of cassandra fix the connection of cassandra ,let filer can connect to multi cassandra nodes which are in the one cluster --- weed/filer/cassandra_store/cassandra_store.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index 3005fead9..0431872fa 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -2,7 +2,7 @@ package cassandra_store import ( "fmt" - + "strings" "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" @@ -34,11 +34,8 @@ func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { - fmt.Println("000") c.cluster = gocql.NewCluster(hosts...) } else if len(s) > 1 { - fmt.Println("111",s[0]) - fmt.Println("222",s[1]) c.cluster = gocql.NewCluster(s[0], s[1]) } c.cluster.Keyspace = keyspace From 3e29d0f75b974294718c5050ef5adabcbbab2e51 Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 15:14:19 +0800 Subject: [PATCH 19/61] Update cassandra_store.go --- weed/filer/cassandra_store/cassandra_store.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index 0431872fa..f4ff5ef3e 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -34,6 +34,7 @@ func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { + glog.V(0).Infof("Only one cassandra node to connect!A Cluster is Proposed" ) c.cluster = gocql.NewCluster(hosts...) } else if len(s) > 1 { c.cluster = gocql.NewCluster(s[0], s[1]) From 3450eff2886f38594154bee4b1b389787c0be575 Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 15:32:16 +0800 Subject: [PATCH 20/61] fix info log fix info log --- weed/filer/cassandra_store/cassandra_store.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index f4ff5ef3e..6b7779bb6 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -34,7 +34,8 @@ func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { - glog.V(0).Infof("Only one cassandra node to connect!A Cluster is Proposed" ) + + glog.V(2).Info("Only one cassandra node to connect!A Cluster is Proposed!Now using:", string(hosts)) c.cluster = gocql.NewCluster(hosts...) } else if len(s) > 1 { c.cluster = gocql.NewCluster(s[0], s[1]) From 5ee7fdc0cc424b508bec85fccab671285359dd2e Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 16:03:39 +0800 Subject: [PATCH 21/61] fix hosts strings fix hosts strings --- weed/filer/cassandra_store/cassandra_store.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index 6b7779bb6..35221d7ee 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -30,13 +30,13 @@ type CassandraStore struct { session *gocql.Session } -func NewCassandraStore(keyspace string, hosts ...string) (c *CassandraStore, err error) { +func NewCassandraStore(keyspace string, hosts string) (c *CassandraStore, err error) { c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { glog.V(2).Info("Only one cassandra node to connect!A Cluster is Proposed!Now using:", string(hosts)) - c.cluster = gocql.NewCluster(hosts...) + c.cluster = gocql.NewCluster(hosts) } else if len(s) > 1 { c.cluster = gocql.NewCluster(s[0], s[1]) } From e025fc00a49db8eebb6df8e83a3c890f55ca5c14 Mon Sep 17 00:00:00 2001 From: eshujiushiwo <378013446@qq.com> Date: Mon, 28 Nov 2016 18:23:39 +0800 Subject: [PATCH 22/61] let filer use all cassandra server let filer use all cassandra server --- weed/filer/cassandra_store/cassandra_store.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index 35221d7ee..c9ea88735 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -34,11 +34,10 @@ func NewCassandraStore(keyspace string, hosts string) (c *CassandraStore, err er c = &CassandraStore{} s := strings.Split(hosts, ",") if len(s) == 1 { - glog.V(2).Info("Only one cassandra node to connect!A Cluster is Proposed!Now using:", string(hosts)) c.cluster = gocql.NewCluster(hosts) } else if len(s) > 1 { - c.cluster = gocql.NewCluster(s[0], s[1]) + c.cluster = gocql.NewCluster(s...) } c.cluster.Keyspace = keyspace c.cluster.Consistency = gocql.Quorum From 64509dd6caa472b3ce590df0c8e6757907697088 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 28 Nov 2016 20:07:27 -0800 Subject: [PATCH 23/61] fix text. --- weed/filer/cassandra_store/cassandra_store.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/weed/filer/cassandra_store/cassandra_store.go b/weed/filer/cassandra_store/cassandra_store.go index c9ea88735..75af48bcd 100644 --- a/weed/filer/cassandra_store/cassandra_store.go +++ b/weed/filer/cassandra_store/cassandra_store.go @@ -3,6 +3,7 @@ package cassandra_store import ( "fmt" "strings" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" @@ -32,13 +33,13 @@ type CassandraStore struct { func NewCassandraStore(keyspace string, hosts string) (c *CassandraStore, err error) { c = &CassandraStore{} - s := strings.Split(hosts, ",") - if len(s) == 1 { - glog.V(2).Info("Only one cassandra node to connect!A Cluster is Proposed!Now using:", string(hosts)) - c.cluster = gocql.NewCluster(hosts) - } else if len(s) > 1 { - c.cluster = gocql.NewCluster(s...) - } + s := strings.Split(hosts, ",") + if len(s) == 1 { + glog.V(2).Info("Only one cassandra node to connect! A cluster is Recommended! Now using:", string(hosts)) + c.cluster = gocql.NewCluster(hosts) + } else if len(s) > 1 { + c.cluster = gocql.NewCluster(s...) + } c.cluster.Keyspace = keyspace c.cluster.Consistency = gocql.Quorum c.session, err = c.cluster.CreateSession() From 855058858adf18584fe352f5e0b5271dc9585e9f Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Wed, 7 Dec 2016 16:51:36 -0700 Subject: [PATCH 24/61] Adding PostgreSQL Filer Support --- weed/filer/postgres_store/postgres_store.go | 365 ++++++++++++++++++++ 1 file changed, 365 insertions(+) create mode 100644 weed/filer/postgres_store/postgres_store.go diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go new file mode 100644 index 000000000..c44486781 --- /dev/null +++ b/weed/filer/postgres_store/postgres_store.go @@ -0,0 +1,365 @@ +package postgres_store + +import ( + "database/sql" + "fmt" + "hash/crc32" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + + _ "github.com/lib/pq" +) + +const ( + default_maxIdleConnections = 100 + default_maxOpenConnections = 50 + default_maxTableNums = 1024 + tableName = "filer_mapping" +) + +var ( + _init_db sync.Once + _db_connections []*sql.DB +) + +type PostgresConf struct { + User string + Password string + HostName string + Port int + DataBase string + SslMode string + MaxIdleConnections int + MaxOpenConnections int +} + +type ShardingConf struct { + IsSharding bool `json:"isSharding"` + ShardCount int `json:"shardCount"` +} + +type PostgresStore struct { + dbs []*sql.DB + isSharding bool + shardCount int + server string + user string + password string +} + +func databaseExists(db *sql.DB, databaseName string) (bool, error) { + sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" + row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) + + var dbName string + err := row.Scan(&dbName) + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + +func createDatabase(db *sql.DB, databaseName string) (error) { + sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; + _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) + return err +} + +func getDbConnection(confs []PostgresConf) []*sql.DB { + _init_db.Do(func() { + for _, conf := range confs { + + sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) + glog.V(3).Infoln("Opening postgres master database") + + var dbErr error + _db_connection, dbErr := sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr := _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) + var existsErr error + dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) + if existsErr != nil { + _db_connection.Close() + _db_connection = nil + panic(existsErr) + } + + if !dbExists { + glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) + createErr := createDatabase(_db_connection, conf.DataBase) + if createErr != nil { + _db_connection.Close() + _db_connection = nil + panic(createErr) + } + } + + glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) + _db_connection.Close() + _db_connection = nil + + sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=60 fallback_application_name=filestore", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) + _db_connection, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr = _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + var maxIdleConnections, maxOpenConnections int + + if conf.MaxIdleConnections != 0 { + maxIdleConnections = conf.MaxIdleConnections + } else { + maxIdleConnections = default_maxIdleConnections + } + if conf.MaxOpenConnections != 0 { + maxOpenConnections = conf.MaxOpenConnections + } else { + maxOpenConnections = default_maxOpenConnections + } + + _db_connection.SetMaxIdleConns(maxIdleConnections) + _db_connection.SetMaxOpenConns(maxOpenConnections) + _db_connections = append(_db_connections, _db_connection) + } + }) + return _db_connections +} + +func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { + pg := &PostgresStore{ + dbs: getDbConnection(confs), + isSharding: isSharding, + shardCount: shardCount, + } + + for _, db := range pg.dbs { + if !isSharding { + pg.shardCount = 1 + } else { + if pg.shardCount == 0 { + pg.shardCount = default_maxTableNums + } + } + for i := 0; i < pg.shardCount; i++ { + if err := pg.createTables(db, tableName, i); err != nil { + fmt.Printf("create table failed %v", err) + } + } + } + + return pg +} + +func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { + hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) + instance_offset = int(hash_value) % len(s.dbs) + table_postfix = int(hash_value) % s.shardCount + return +} + +func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { + instance_offset, table_postfix := s.hash(path) + instanceId = instance_offset + if s.isSharding { + tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) + } else { + tableFullName = tableName + } + return +} + +func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) + } + fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) + + return fid, err +} + +func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { + var tableFullName string + + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) + } + var old_fid string + if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { + return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) + } else { + if len(old_fid) == 0 { + err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } else { + err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } + } + return +} + +func (s *PostgresStore) Delete(fullFilePath string) (err error) { + var fid string + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) + } + if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) + } else if fid == "" { + return nil + } + if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) + } else { + return nil + } +} + +func (s *PostgresStore) Close() { + for _, db := range s.dbs { + db.Close() + } +} + +var createTable = ` + +CREATE TABLE IF NOT EXISTS %s ( + id BIGSERIAL NOT NULL, + uriPath VARCHAR(1024) NOT NULL DEFAULT '', + fid VARCHAR(36) NOT NULL DEFAULT '', + createTime BIGINT NOT NULL DEFAULT 0, + updateTime BIGINT NOT NULL DEFAULT 0, + remark VARCHAR(20) NOT NULL DEFAULT '', + status SMALLINT NOT NULL DEFAULT '1', + PRIMARY KEY (id), + CONSTRAINT %s_index_uriPath UNIQUE (uriPath) +); +` + +func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { + var realTableName string + if s.isSharding { + realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) + } else { + realTableName = tableName + } + + glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) + + sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) + + stmt, err := db.Prepare(sqlCreate) + if err != nil { + return err + } + defer stmt.Close() + + _, err = stmt.Exec() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { + sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) + + row := db.QueryRow(sqlStatement, uriPath) + var fid string + err := row.Scan(&fid) + + glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) + + if err != nil { + return "", err + } + return fid, nil +} + +func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) + + glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) + + glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) + + if err != nil { + return err + } + + rows, err := res.RowsAffected() + if rows != 1 { + return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) + } + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) + + glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) + + res, err := db.Exec(sqlStatement, uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} \ No newline at end of file From a1783a14cc176c9950befb33156addb249ede693 Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Wed, 7 Dec 2016 16:54:06 -0700 Subject: [PATCH 25/61] Revert "Adding PostgreSQL Filer Support" This reverts commit 855058858adf18584fe352f5e0b5271dc9585e9f. --- weed/filer/postgres_store/postgres_store.go | 365 -------------------- 1 file changed, 365 deletions(-) delete mode 100644 weed/filer/postgres_store/postgres_store.go diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go deleted file mode 100644 index c44486781..000000000 --- a/weed/filer/postgres_store/postgres_store.go +++ /dev/null @@ -1,365 +0,0 @@ -package postgres_store - -import ( - "database/sql" - "fmt" - "hash/crc32" - "sync" - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - - _ "github.com/lib/pq" -) - -const ( - default_maxIdleConnections = 100 - default_maxOpenConnections = 50 - default_maxTableNums = 1024 - tableName = "filer_mapping" -) - -var ( - _init_db sync.Once - _db_connections []*sql.DB -) - -type PostgresConf struct { - User string - Password string - HostName string - Port int - DataBase string - SslMode string - MaxIdleConnections int - MaxOpenConnections int -} - -type ShardingConf struct { - IsSharding bool `json:"isSharding"` - ShardCount int `json:"shardCount"` -} - -type PostgresStore struct { - dbs []*sql.DB - isSharding bool - shardCount int - server string - user string - password string -} - -func databaseExists(db *sql.DB, databaseName string) (bool, error) { - sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" - row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) - - var dbName string - err := row.Scan(&dbName) - if err != nil { - if err == sql.ErrNoRows { - return false, nil - } - return false, err - } - return true, nil -} - -func createDatabase(db *sql.DB, databaseName string) (error) { - sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; - _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) - return err -} - -func getDbConnection(confs []PostgresConf) []*sql.DB { - _init_db.Do(func() { - for _, conf := range confs { - - sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) - glog.V(3).Infoln("Opening postgres master database") - - var dbErr error - _db_connection, dbErr := sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } - - pingErr := _db_connection.Ping() - if pingErr != nil { - _db_connection.Close() - _db_connection = nil - panic(pingErr) - } - - glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) - var existsErr error - dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) - if existsErr != nil { - _db_connection.Close() - _db_connection = nil - panic(existsErr) - } - - if !dbExists { - glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) - createErr := createDatabase(_db_connection, conf.DataBase) - if createErr != nil { - _db_connection.Close() - _db_connection = nil - panic(createErr) - } - } - - glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) - _db_connection.Close() - _db_connection = nil - - sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=60 fallback_application_name=filestore", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) - _db_connection, dbErr = sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } - - pingErr = _db_connection.Ping() - if pingErr != nil { - _db_connection.Close() - _db_connection = nil - panic(pingErr) - } - - var maxIdleConnections, maxOpenConnections int - - if conf.MaxIdleConnections != 0 { - maxIdleConnections = conf.MaxIdleConnections - } else { - maxIdleConnections = default_maxIdleConnections - } - if conf.MaxOpenConnections != 0 { - maxOpenConnections = conf.MaxOpenConnections - } else { - maxOpenConnections = default_maxOpenConnections - } - - _db_connection.SetMaxIdleConns(maxIdleConnections) - _db_connection.SetMaxOpenConns(maxOpenConnections) - _db_connections = append(_db_connections, _db_connection) - } - }) - return _db_connections -} - -func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { - pg := &PostgresStore{ - dbs: getDbConnection(confs), - isSharding: isSharding, - shardCount: shardCount, - } - - for _, db := range pg.dbs { - if !isSharding { - pg.shardCount = 1 - } else { - if pg.shardCount == 0 { - pg.shardCount = default_maxTableNums - } - } - for i := 0; i < pg.shardCount; i++ { - if err := pg.createTables(db, tableName, i); err != nil { - fmt.Printf("create table failed %v", err) - } - } - } - - return pg -} - -func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { - hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) - instance_offset = int(hash_value) % len(s.dbs) - table_postfix = int(hash_value) % s.shardCount - return -} - -func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { - instance_offset, table_postfix := s.hash(path) - instanceId = instance_offset - if s.isSharding { - tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) - } else { - tableFullName = tableName - } - return -} - -func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) - } - fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) - - return fid, err -} - -func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { - var tableFullName string - - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) - } - var old_fid string - if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { - return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) - } else { - if len(old_fid) == 0 { - err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) - if err != nil { - return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) - } - } else { - err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) - if err != nil { - return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) - } - } - } - return -} - -func (s *PostgresStore) Delete(fullFilePath string) (err error) { - var fid string - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) - } - if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { - return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) - } else if fid == "" { - return nil - } - if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { - return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) - } else { - return nil - } -} - -func (s *PostgresStore) Close() { - for _, db := range s.dbs { - db.Close() - } -} - -var createTable = ` - -CREATE TABLE IF NOT EXISTS %s ( - id BIGSERIAL NOT NULL, - uriPath VARCHAR(1024) NOT NULL DEFAULT '', - fid VARCHAR(36) NOT NULL DEFAULT '', - createTime BIGINT NOT NULL DEFAULT 0, - updateTime BIGINT NOT NULL DEFAULT 0, - remark VARCHAR(20) NOT NULL DEFAULT '', - status SMALLINT NOT NULL DEFAULT '1', - PRIMARY KEY (id), - CONSTRAINT %s_index_uriPath UNIQUE (uriPath) -); -` - -func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { - var realTableName string - if s.isSharding { - realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) - } else { - realTableName = tableName - } - - glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) - - sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) - - stmt, err := db.Prepare(sqlCreate) - if err != nil { - return err - } - defer stmt.Close() - - _, err = stmt.Exec() - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { - sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) - - row := db.QueryRow(sqlStatement, uriPath) - var fid string - err := row.Scan(&fid) - - glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) - - if err != nil { - return "", err - } - return fid, nil -} - -func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) - - glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) - if err != nil { - return err - } - - _, err = res.RowsAffected() - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) - - glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) - - if err != nil { - return err - } - - rows, err := res.RowsAffected() - if rows != 1 { - return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) - } - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) - - glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) - - res, err := db.Exec(sqlStatement, uriPath) - if err != nil { - return err - } - - _, err = res.RowsAffected() - if err != nil { - return err - } - return nil -} \ No newline at end of file From 10853e4d2f683dd91d73b02bd68974981e111698 Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Wed, 7 Dec 2016 17:13:03 -0700 Subject: [PATCH 26/61] Adding PostgreSQL Filer Support --- weed/filer/postgres_store/postgres_store.go | 365 ++++++++++++++++++++ weed/server/filer_server.go | 5 + 2 files changed, 370 insertions(+) create mode 100644 weed/filer/postgres_store/postgres_store.go diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go new file mode 100644 index 000000000..c44486781 --- /dev/null +++ b/weed/filer/postgres_store/postgres_store.go @@ -0,0 +1,365 @@ +package postgres_store + +import ( + "database/sql" + "fmt" + "hash/crc32" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + + _ "github.com/lib/pq" +) + +const ( + default_maxIdleConnections = 100 + default_maxOpenConnections = 50 + default_maxTableNums = 1024 + tableName = "filer_mapping" +) + +var ( + _init_db sync.Once + _db_connections []*sql.DB +) + +type PostgresConf struct { + User string + Password string + HostName string + Port int + DataBase string + SslMode string + MaxIdleConnections int + MaxOpenConnections int +} + +type ShardingConf struct { + IsSharding bool `json:"isSharding"` + ShardCount int `json:"shardCount"` +} + +type PostgresStore struct { + dbs []*sql.DB + isSharding bool + shardCount int + server string + user string + password string +} + +func databaseExists(db *sql.DB, databaseName string) (bool, error) { + sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" + row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) + + var dbName string + err := row.Scan(&dbName) + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + +func createDatabase(db *sql.DB, databaseName string) (error) { + sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; + _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) + return err +} + +func getDbConnection(confs []PostgresConf) []*sql.DB { + _init_db.Do(func() { + for _, conf := range confs { + + sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) + glog.V(3).Infoln("Opening postgres master database") + + var dbErr error + _db_connection, dbErr := sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr := _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) + var existsErr error + dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) + if existsErr != nil { + _db_connection.Close() + _db_connection = nil + panic(existsErr) + } + + if !dbExists { + glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) + createErr := createDatabase(_db_connection, conf.DataBase) + if createErr != nil { + _db_connection.Close() + _db_connection = nil + panic(createErr) + } + } + + glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) + _db_connection.Close() + _db_connection = nil + + sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=60 fallback_application_name=filestore", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) + _db_connection, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr = _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + var maxIdleConnections, maxOpenConnections int + + if conf.MaxIdleConnections != 0 { + maxIdleConnections = conf.MaxIdleConnections + } else { + maxIdleConnections = default_maxIdleConnections + } + if conf.MaxOpenConnections != 0 { + maxOpenConnections = conf.MaxOpenConnections + } else { + maxOpenConnections = default_maxOpenConnections + } + + _db_connection.SetMaxIdleConns(maxIdleConnections) + _db_connection.SetMaxOpenConns(maxOpenConnections) + _db_connections = append(_db_connections, _db_connection) + } + }) + return _db_connections +} + +func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { + pg := &PostgresStore{ + dbs: getDbConnection(confs), + isSharding: isSharding, + shardCount: shardCount, + } + + for _, db := range pg.dbs { + if !isSharding { + pg.shardCount = 1 + } else { + if pg.shardCount == 0 { + pg.shardCount = default_maxTableNums + } + } + for i := 0; i < pg.shardCount; i++ { + if err := pg.createTables(db, tableName, i); err != nil { + fmt.Printf("create table failed %v", err) + } + } + } + + return pg +} + +func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { + hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) + instance_offset = int(hash_value) % len(s.dbs) + table_postfix = int(hash_value) % s.shardCount + return +} + +func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { + instance_offset, table_postfix := s.hash(path) + instanceId = instance_offset + if s.isSharding { + tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) + } else { + tableFullName = tableName + } + return +} + +func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) + } + fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) + + return fid, err +} + +func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { + var tableFullName string + + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) + } + var old_fid string + if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { + return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) + } else { + if len(old_fid) == 0 { + err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } else { + err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } + } + return +} + +func (s *PostgresStore) Delete(fullFilePath string) (err error) { + var fid string + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) + } + if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) + } else if fid == "" { + return nil + } + if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) + } else { + return nil + } +} + +func (s *PostgresStore) Close() { + for _, db := range s.dbs { + db.Close() + } +} + +var createTable = ` + +CREATE TABLE IF NOT EXISTS %s ( + id BIGSERIAL NOT NULL, + uriPath VARCHAR(1024) NOT NULL DEFAULT '', + fid VARCHAR(36) NOT NULL DEFAULT '', + createTime BIGINT NOT NULL DEFAULT 0, + updateTime BIGINT NOT NULL DEFAULT 0, + remark VARCHAR(20) NOT NULL DEFAULT '', + status SMALLINT NOT NULL DEFAULT '1', + PRIMARY KEY (id), + CONSTRAINT %s_index_uriPath UNIQUE (uriPath) +); +` + +func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { + var realTableName string + if s.isSharding { + realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) + } else { + realTableName = tableName + } + + glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) + + sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) + + stmt, err := db.Prepare(sqlCreate) + if err != nil { + return err + } + defer stmt.Close() + + _, err = stmt.Exec() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { + sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) + + row := db.QueryRow(sqlStatement, uriPath) + var fid string + err := row.Scan(&fid) + + glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) + + if err != nil { + return "", err + } + return fid, nil +} + +func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) + + glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) + + glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) + + if err != nil { + return err + } + + rows, err := res.RowsAffected() + if rows != 1 { + return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) + } + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) + + glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) + + res, err := db.Exec(sqlStatement, uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} \ No newline at end of file diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 959bb92cb..fc7799efc 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer/embedded_filer" "github.com/chrislusf/seaweedfs/weed/filer/flat_namespace" "github.com/chrislusf/seaweedfs/weed/filer/mysql_store" + "github.com/chrislusf/seaweedfs/weed/filer/postgres_store" "github.com/chrislusf/seaweedfs/weed/filer/redis_store" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -24,6 +25,7 @@ import ( type filerConf struct { MysqlConf []mysql_store.MySqlConf `json:"mysql"` mysql_store.ShardingConf + PostgresConf []postgres_store.PostgresConf `json:"postgres"` } func parseConfFile(confPath string) (*filerConf, error) { @@ -86,6 +88,9 @@ func NewFilerServer(r *http.ServeMux, ip string, port int, master string, dir st if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 { mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount) fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store) + } else if setting.PostgresConf != nil && len(setting.PostgresConf) != 0 { + postgres_store := postgres_store.NewPostgresStore(setting.PostgresConf, setting.IsSharding, setting.ShardCount) + fs.filer = flat_namespace.NewFlatNamespaceFiler(master, postgres_store) } else if cassandra_server != "" { cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server) if err != nil { From b18ec59c5674ee6c896a29fe91584837875eb431 Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Wed, 7 Dec 2016 17:16:10 -0700 Subject: [PATCH 27/61] Revert "Adding PostgreSQL Filer Support" This reverts commit 10853e4d2f683dd91d73b02bd68974981e111698. --- weed/filer/postgres_store/postgres_store.go | 365 -------------------- weed/server/filer_server.go | 5 - 2 files changed, 370 deletions(-) delete mode 100644 weed/filer/postgres_store/postgres_store.go diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go deleted file mode 100644 index c44486781..000000000 --- a/weed/filer/postgres_store/postgres_store.go +++ /dev/null @@ -1,365 +0,0 @@ -package postgres_store - -import ( - "database/sql" - "fmt" - "hash/crc32" - "sync" - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - - _ "github.com/lib/pq" -) - -const ( - default_maxIdleConnections = 100 - default_maxOpenConnections = 50 - default_maxTableNums = 1024 - tableName = "filer_mapping" -) - -var ( - _init_db sync.Once - _db_connections []*sql.DB -) - -type PostgresConf struct { - User string - Password string - HostName string - Port int - DataBase string - SslMode string - MaxIdleConnections int - MaxOpenConnections int -} - -type ShardingConf struct { - IsSharding bool `json:"isSharding"` - ShardCount int `json:"shardCount"` -} - -type PostgresStore struct { - dbs []*sql.DB - isSharding bool - shardCount int - server string - user string - password string -} - -func databaseExists(db *sql.DB, databaseName string) (bool, error) { - sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" - row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) - - var dbName string - err := row.Scan(&dbName) - if err != nil { - if err == sql.ErrNoRows { - return false, nil - } - return false, err - } - return true, nil -} - -func createDatabase(db *sql.DB, databaseName string) (error) { - sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; - _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) - return err -} - -func getDbConnection(confs []PostgresConf) []*sql.DB { - _init_db.Do(func() { - for _, conf := range confs { - - sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) - glog.V(3).Infoln("Opening postgres master database") - - var dbErr error - _db_connection, dbErr := sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } - - pingErr := _db_connection.Ping() - if pingErr != nil { - _db_connection.Close() - _db_connection = nil - panic(pingErr) - } - - glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) - var existsErr error - dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) - if existsErr != nil { - _db_connection.Close() - _db_connection = nil - panic(existsErr) - } - - if !dbExists { - glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) - createErr := createDatabase(_db_connection, conf.DataBase) - if createErr != nil { - _db_connection.Close() - _db_connection = nil - panic(createErr) - } - } - - glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) - _db_connection.Close() - _db_connection = nil - - sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=60 fallback_application_name=filestore", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) - _db_connection, dbErr = sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } - - pingErr = _db_connection.Ping() - if pingErr != nil { - _db_connection.Close() - _db_connection = nil - panic(pingErr) - } - - var maxIdleConnections, maxOpenConnections int - - if conf.MaxIdleConnections != 0 { - maxIdleConnections = conf.MaxIdleConnections - } else { - maxIdleConnections = default_maxIdleConnections - } - if conf.MaxOpenConnections != 0 { - maxOpenConnections = conf.MaxOpenConnections - } else { - maxOpenConnections = default_maxOpenConnections - } - - _db_connection.SetMaxIdleConns(maxIdleConnections) - _db_connection.SetMaxOpenConns(maxOpenConnections) - _db_connections = append(_db_connections, _db_connection) - } - }) - return _db_connections -} - -func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { - pg := &PostgresStore{ - dbs: getDbConnection(confs), - isSharding: isSharding, - shardCount: shardCount, - } - - for _, db := range pg.dbs { - if !isSharding { - pg.shardCount = 1 - } else { - if pg.shardCount == 0 { - pg.shardCount = default_maxTableNums - } - } - for i := 0; i < pg.shardCount; i++ { - if err := pg.createTables(db, tableName, i); err != nil { - fmt.Printf("create table failed %v", err) - } - } - } - - return pg -} - -func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { - hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) - instance_offset = int(hash_value) % len(s.dbs) - table_postfix = int(hash_value) % s.shardCount - return -} - -func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { - instance_offset, table_postfix := s.hash(path) - instanceId = instance_offset - if s.isSharding { - tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) - } else { - tableFullName = tableName - } - return -} - -func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) - } - fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) - - return fid, err -} - -func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { - var tableFullName string - - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) - } - var old_fid string - if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { - return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) - } else { - if len(old_fid) == 0 { - err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) - if err != nil { - return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) - } - } else { - err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) - if err != nil { - return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) - } - } - } - return -} - -func (s *PostgresStore) Delete(fullFilePath string) (err error) { - var fid string - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) - } - if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { - return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) - } else if fid == "" { - return nil - } - if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { - return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) - } else { - return nil - } -} - -func (s *PostgresStore) Close() { - for _, db := range s.dbs { - db.Close() - } -} - -var createTable = ` - -CREATE TABLE IF NOT EXISTS %s ( - id BIGSERIAL NOT NULL, - uriPath VARCHAR(1024) NOT NULL DEFAULT '', - fid VARCHAR(36) NOT NULL DEFAULT '', - createTime BIGINT NOT NULL DEFAULT 0, - updateTime BIGINT NOT NULL DEFAULT 0, - remark VARCHAR(20) NOT NULL DEFAULT '', - status SMALLINT NOT NULL DEFAULT '1', - PRIMARY KEY (id), - CONSTRAINT %s_index_uriPath UNIQUE (uriPath) -); -` - -func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { - var realTableName string - if s.isSharding { - realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) - } else { - realTableName = tableName - } - - glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) - - sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) - - stmt, err := db.Prepare(sqlCreate) - if err != nil { - return err - } - defer stmt.Close() - - _, err = stmt.Exec() - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { - sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) - - row := db.QueryRow(sqlStatement, uriPath) - var fid string - err := row.Scan(&fid) - - glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) - - if err != nil { - return "", err - } - return fid, nil -} - -func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) - - glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) - if err != nil { - return err - } - - _, err = res.RowsAffected() - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) - - glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) - - if err != nil { - return err - } - - rows, err := res.RowsAffected() - if rows != 1 { - return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) - } - if err != nil { - return err - } - return nil -} - -func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) - - glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) - - res, err := db.Exec(sqlStatement, uriPath) - if err != nil { - return err - } - - _, err = res.RowsAffected() - if err != nil { - return err - } - return nil -} \ No newline at end of file diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index fc7799efc..959bb92cb 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -14,7 +14,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer/embedded_filer" "github.com/chrislusf/seaweedfs/weed/filer/flat_namespace" "github.com/chrislusf/seaweedfs/weed/filer/mysql_store" - "github.com/chrislusf/seaweedfs/weed/filer/postgres_store" "github.com/chrislusf/seaweedfs/weed/filer/redis_store" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -25,7 +24,6 @@ import ( type filerConf struct { MysqlConf []mysql_store.MySqlConf `json:"mysql"` mysql_store.ShardingConf - PostgresConf []postgres_store.PostgresConf `json:"postgres"` } func parseConfFile(confPath string) (*filerConf, error) { @@ -88,9 +86,6 @@ func NewFilerServer(r *http.ServeMux, ip string, port int, master string, dir st if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 { mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount) fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store) - } else if setting.PostgresConf != nil && len(setting.PostgresConf) != 0 { - postgres_store := postgres_store.NewPostgresStore(setting.PostgresConf, setting.IsSharding, setting.ShardCount) - fs.filer = flat_namespace.NewFlatNamespaceFiler(master, postgres_store) } else if cassandra_server != "" { cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server) if err != nil { From e88795be133d384813b6b0dd2d556565856cb663 Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Wed, 7 Dec 2016 17:24:40 -0700 Subject: [PATCH 28/61] Adding PostgreSQL Support --- weed/filer/postgres_store/postgres_store.go | 365 ++++++++++++++++++++ weed/server/filer_server.go | 5 + 2 files changed, 370 insertions(+) create mode 100644 weed/filer/postgres_store/postgres_store.go diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go new file mode 100644 index 000000000..9b674aedf --- /dev/null +++ b/weed/filer/postgres_store/postgres_store.go @@ -0,0 +1,365 @@ +package postgres_store + +import ( + "database/sql" + "fmt" + "hash/crc32" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + + _ "github.com/lib/pq" +) + +const ( + default_maxIdleConnections = 100 + default_maxOpenConnections = 50 + default_maxTableNums = 1024 + tableName = "filer_mapping" +) + +var ( + _init_db sync.Once + _db_connections []*sql.DB +) + +type PostgresConf struct { + User string + Password string + HostName string + Port int + DataBase string + SslMode string + MaxIdleConnections int + MaxOpenConnections int +} + +type ShardingConf struct { + IsSharding bool `json:"isSharding"` + ShardCount int `json:"shardCount"` +} + +type PostgresStore struct { + dbs []*sql.DB + isSharding bool + shardCount int + server string + user string + password string +} + +func databaseExists(db *sql.DB, databaseName string) (bool, error) { + sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" + row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) + + var dbName string + err := row.Scan(&dbName) + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + +func createDatabase(db *sql.DB, databaseName string) (error) { + sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; + _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) + return err +} + +func getDbConnection(confs []PostgresConf) []*sql.DB { + _init_db.Do(func() { + for _, conf := range confs { + + sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) + glog.V(3).Infoln("Opening postgres master database") + + var dbErr error + _db_connection, dbErr := sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr := _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) + var existsErr error + dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) + if existsErr != nil { + _db_connection.Close() + _db_connection = nil + panic(existsErr) + } + + if !dbExists { + glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) + createErr := createDatabase(_db_connection, conf.DataBase) + if createErr != nil { + _db_connection.Close() + _db_connection = nil + panic(createErr) + } + } + + glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) + _db_connection.Close() + _db_connection = nil + + sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) + _db_connection, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) + } + + pingErr = _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + var maxIdleConnections, maxOpenConnections int + + if conf.MaxIdleConnections != 0 { + maxIdleConnections = conf.MaxIdleConnections + } else { + maxIdleConnections = default_maxIdleConnections + } + if conf.MaxOpenConnections != 0 { + maxOpenConnections = conf.MaxOpenConnections + } else { + maxOpenConnections = default_maxOpenConnections + } + + _db_connection.SetMaxIdleConns(maxIdleConnections) + _db_connection.SetMaxOpenConns(maxOpenConnections) + _db_connections = append(_db_connections, _db_connection) + } + }) + return _db_connections +} + +func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { + pg := &PostgresStore{ + dbs: getDbConnection(confs), + isSharding: isSharding, + shardCount: shardCount, + } + + for _, db := range pg.dbs { + if !isSharding { + pg.shardCount = 1 + } else { + if pg.shardCount == 0 { + pg.shardCount = default_maxTableNums + } + } + for i := 0; i < pg.shardCount; i++ { + if err := pg.createTables(db, tableName, i); err != nil { + fmt.Printf("create table failed %v", err) + } + } + } + + return pg +} + +func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { + hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) + instance_offset = int(hash_value) % len(s.dbs) + table_postfix = int(hash_value) % s.shardCount + return +} + +func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { + instance_offset, table_postfix := s.hash(path) + instanceId = instance_offset + if s.isSharding { + tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) + } else { + tableFullName = tableName + } + return +} + +func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) + } + fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) + + return fid, err +} + +func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { + var tableFullName string + + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) + } + var old_fid string + if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { + return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) + } else { + if len(old_fid) == 0 { + err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } else { + err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + if err != nil { + return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) + } + } + } + return +} + +func (s *PostgresStore) Delete(fullFilePath string) (err error) { + var fid string + instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) + if err != nil { + return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) + } + if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) + } else if fid == "" { + return nil + } + if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) + } else { + return nil + } +} + +func (s *PostgresStore) Close() { + for _, db := range s.dbs { + db.Close() + } +} + +var createTable = ` + +CREATE TABLE IF NOT EXISTS %s ( + id BIGSERIAL NOT NULL, + uriPath VARCHAR(1024) NOT NULL DEFAULT '', + fid VARCHAR(36) NOT NULL DEFAULT '', + createTime BIGINT NOT NULL DEFAULT 0, + updateTime BIGINT NOT NULL DEFAULT 0, + remark VARCHAR(20) NOT NULL DEFAULT '', + status SMALLINT NOT NULL DEFAULT '1', + PRIMARY KEY (id), + CONSTRAINT %s_index_uriPath UNIQUE (uriPath) +); +` + +func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { + var realTableName string + if s.isSharding { + realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) + } else { + realTableName = tableName + } + + glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) + + sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) + + stmt, err := db.Prepare(sqlCreate) + if err != nil { + return err + } + defer stmt.Close() + + _, err = stmt.Exec() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { + sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) + + row := db.QueryRow(sqlStatement, uriPath) + var fid string + err := row.Scan(&fid) + + glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) + + if err != nil { + return "", err + } + return fid, nil +} + +func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) + + glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) + + glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) + + res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) + + if err != nil { + return err + } + + rows, err := res.RowsAffected() + if rows != 1 { + return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) + } + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { + sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) + + glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) + + res, err := db.Exec(sqlStatement, uriPath) + if err != nil { + return err + } + + _, err = res.RowsAffected() + if err != nil { + return err + } + return nil +} \ No newline at end of file diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 959bb92cb..fc7799efc 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer/embedded_filer" "github.com/chrislusf/seaweedfs/weed/filer/flat_namespace" "github.com/chrislusf/seaweedfs/weed/filer/mysql_store" + "github.com/chrislusf/seaweedfs/weed/filer/postgres_store" "github.com/chrislusf/seaweedfs/weed/filer/redis_store" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -24,6 +25,7 @@ import ( type filerConf struct { MysqlConf []mysql_store.MySqlConf `json:"mysql"` mysql_store.ShardingConf + PostgresConf []postgres_store.PostgresConf `json:"postgres"` } func parseConfFile(confPath string) (*filerConf, error) { @@ -86,6 +88,9 @@ func NewFilerServer(r *http.ServeMux, ip string, port int, master string, dir st if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 { mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount) fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store) + } else if setting.PostgresConf != nil && len(setting.PostgresConf) != 0 { + postgres_store := postgres_store.NewPostgresStore(setting.PostgresConf, setting.IsSharding, setting.ShardCount) + fs.filer = flat_namespace.NewFlatNamespaceFiler(master, postgres_store) } else if cassandra_server != "" { cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server) if err != nil { From 43e655afc876f443c896300c98fc9d1d9469340f Mon Sep 17 00:00:00 2001 From: Mike Tolman Date: Mon, 12 Dec 2016 13:03:32 -0700 Subject: [PATCH 29/61] Updating PostgreSQL file store integration to support directories. --- weed/filer/postgres_store/postgres_store.go | 616 ++++++++++++++------ weed/server/filer_server.go | 7 +- 2 files changed, 440 insertions(+), 183 deletions(-) diff --git a/weed/filer/postgres_store/postgres_store.go b/weed/filer/postgres_store/postgres_store.go index 9b674aedf..68a6298ee 100644 --- a/weed/filer/postgres_store/postgres_store.go +++ b/weed/filer/postgres_store/postgres_store.go @@ -2,26 +2,30 @@ package postgres_store import ( "database/sql" + "errors" "fmt" - "hash/crc32" + "path/filepath" "sync" "time" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" _ "github.com/lib/pq" + _ "path/filepath" + "strings" ) const ( default_maxIdleConnections = 100 default_maxOpenConnections = 50 - default_maxTableNums = 1024 - tableName = "filer_mapping" + filesTableName = "files" + directoriesTableName = "directories" ) var ( - _init_db sync.Once - _db_connections []*sql.DB + _init_db sync.Once + _db_connection *sql.DB ) type PostgresConf struct { @@ -30,27 +34,60 @@ type PostgresConf struct { HostName string Port int DataBase string - SslMode string + SslMode string MaxIdleConnections int MaxOpenConnections int } -type ShardingConf struct { - IsSharding bool `json:"isSharding"` - ShardCount int `json:"shardCount"` -} - type PostgresStore struct { - dbs []*sql.DB - isSharding bool - shardCount int - server string - user string + db *sql.DB + server string + user string password string } +func (s *PostgresStore) CreateFile(fullFileName string, fid string) (err error) { + glog.V(3).Infoln("Calling posgres_store CreateFile") + return s.Put(fullFileName, fid) +} + +func (s *PostgresStore) FindFile(fullFileName string) (fid string, err error) { + glog.V(3).Infoln("Calling posgres_store FindFile") + return s.Get(fullFileName) +} + +func (s *PostgresStore) DeleteFile(fullFileName string) (fid string, err error) { + glog.V(3).Infoln("Calling posgres_store DeleteFile") + return "", s.Delete(fullFileName) +} + +func (s *PostgresStore) FindDirectory(dirPath string) (dirId filer.DirectoryId, err error) { + glog.V(3).Infoln("Calling posgres_store FindDirectory") + return s.FindDir(dirPath) +} + +func (s *PostgresStore) ListDirectories(dirPath string) (dirs []filer.DirectoryEntry, err error) { + glog.V(3).Infoln("Calling posgres_store ListDirectories") + return s.ListDirs(dirPath) +} + +func (s *PostgresStore) ListFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { + glog.V(3).Infoln("Calling posgres_store ListFiles") + return s.FindFiles(dirPath, lastFileName, limit) +} + +func (s *PostgresStore) DeleteDirectory(dirPath string, recursive bool) (err error) { + glog.V(3).Infoln("Calling posgres_store DeleteDirectory") + return s.DeleteDir(dirPath, recursive) +} + +func (s *PostgresStore) Move(fromPath string, toPath string) (err error) { + glog.V(3).Infoln("Calling posgres_store Move") + return errors.New("Move is not yet implemented for the PostgreSQL store.") +} + func databaseExists(db *sql.DB, databaseName string) (bool, error) { - sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" + sqlStatement := "SELECT datname from pg_database WHERE datname='%s'" row := db.QueryRow(fmt.Sprintf(sqlStatement, databaseName)) var dbName string @@ -64,164 +101,126 @@ func databaseExists(db *sql.DB, databaseName string) (bool, error) { return true, nil } -func createDatabase(db *sql.DB, databaseName string) (error) { - sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'"; +func createDatabase(db *sql.DB, databaseName string) error { + sqlStatement := "CREATE DATABASE %s ENCODING='UTF8'" _, err := db.Exec(fmt.Sprintf(sqlStatement, databaseName)) return err } -func getDbConnection(confs []PostgresConf) []*sql.DB { +func getDbConnection(conf PostgresConf) *sql.DB { _init_db.Do(func() { - for _, conf := range confs { - - sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) - glog.V(3).Infoln("Opening postgres master database") - - var dbErr error - _db_connection, dbErr := sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } - - pingErr := _db_connection.Ping() - if pingErr != nil { - _db_connection.Close() - _db_connection = nil - panic(pingErr) - } - - glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) - var existsErr error - dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) - if existsErr != nil { - _db_connection.Close() - _db_connection = nil - panic(existsErr) - } - - if !dbExists { - glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) - createErr := createDatabase(_db_connection, conf.DataBase) - if createErr != nil { - _db_connection.Close() - _db_connection = nil - panic(createErr) - } - } - glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) + sqlUrl := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, "postgres", conf.SslMode) + glog.V(3).Infoln("Opening postgres master database") + + var dbErr error + _db_connection, dbErr := sql.Open("postgres", sqlUrl) + if dbErr != nil { _db_connection.Close() _db_connection = nil - - sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) - _db_connection, dbErr = sql.Open("postgres", sqlUrl) - if dbErr != nil { - _db_connection.Close() - _db_connection = nil - panic(dbErr) - } + panic(dbErr) + } + + pingErr := _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } + + glog.V(3).Infoln("Checking to see if DB exists: ", conf.DataBase) + var existsErr error + dbExists, existsErr := databaseExists(_db_connection, conf.DataBase) + if existsErr != nil { + _db_connection.Close() + _db_connection = nil + panic(existsErr) + } - pingErr = _db_connection.Ping() - if pingErr != nil { + if !dbExists { + glog.V(3).Infoln("Database doesn't exist. Attempting to create one: ", conf.DataBase) + createErr := createDatabase(_db_connection, conf.DataBase) + if createErr != nil { _db_connection.Close() _db_connection = nil - panic(pingErr) + panic(createErr) } - - var maxIdleConnections, maxOpenConnections int + } - if conf.MaxIdleConnections != 0 { - maxIdleConnections = conf.MaxIdleConnections - } else { - maxIdleConnections = default_maxIdleConnections - } - if conf.MaxOpenConnections != 0 { - maxOpenConnections = conf.MaxOpenConnections - } else { - maxOpenConnections = default_maxOpenConnections - } + glog.V(3).Infoln("Closing master postgres database and opening configured database: ", conf.DataBase) + _db_connection.Close() + _db_connection = nil - _db_connection.SetMaxIdleConns(maxIdleConnections) - _db_connection.SetMaxOpenConns(maxOpenConnections) - _db_connections = append(_db_connections, _db_connection) + sqlUrl = fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30", conf.HostName, conf.Port, conf.User, conf.Password, conf.DataBase, conf.SslMode) + _db_connection, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + _db_connection.Close() + _db_connection = nil + panic(dbErr) } - }) - return _db_connections -} -func NewPostgresStore(confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { - pg := &PostgresStore{ - dbs: getDbConnection(confs), - isSharding: isSharding, - shardCount: shardCount, - } + pingErr = _db_connection.Ping() + if pingErr != nil { + _db_connection.Close() + _db_connection = nil + panic(pingErr) + } - for _, db := range pg.dbs { - if !isSharding { - pg.shardCount = 1 + var maxIdleConnections, maxOpenConnections int + + if conf.MaxIdleConnections != 0 { + maxIdleConnections = conf.MaxIdleConnections } else { - if pg.shardCount == 0 { - pg.shardCount = default_maxTableNums - } + maxIdleConnections = default_maxIdleConnections } - for i := 0; i < pg.shardCount; i++ { - if err := pg.createTables(db, tableName, i); err != nil { - fmt.Printf("create table failed %v", err) - } + if conf.MaxOpenConnections != 0 { + maxOpenConnections = conf.MaxOpenConnections + } else { + maxOpenConnections = default_maxOpenConnections } - } - - return pg -} -func (s *PostgresStore) hash(fullFileName string) (instance_offset, table_postfix int) { - hash_value := crc32.ChecksumIEEE([]byte(fullFileName)) - instance_offset = int(hash_value) % len(s.dbs) - table_postfix = int(hash_value) % s.shardCount - return + _db_connection.SetMaxIdleConns(maxIdleConnections) + _db_connection.SetMaxOpenConns(maxOpenConnections) + }) + return _db_connection } -func (s *PostgresStore) parseFilerMappingInfo(path string) (instanceId int, tableFullName string, err error) { - instance_offset, table_postfix := s.hash(path) - instanceId = instance_offset - if s.isSharding { - tableFullName = fmt.Sprintf("%s_%04d", tableName, table_postfix) - } else { - tableFullName = tableName +//func NewPostgresStore(master string, confs []PostgresConf, isSharding bool, shardCount int) *PostgresStore { +func NewPostgresStore(master string, conf PostgresConf) *PostgresStore { + pg := &PostgresStore{ + db: getDbConnection(conf), } - return + + pg.createDirectoriesTable() + + if err := pg.createFilesTable(); err != nil { + fmt.Printf("create table failed %v", err) + } + + return pg } func (s *PostgresStore) Get(fullFilePath string) (fid string, err error) { - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) if err != nil { return "", fmt.Errorf("PostgresStore Get operation can not parse file path %s: err is %v", fullFilePath, err) } - fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName) - + fid, err = s.query(fullFilePath) + return fid, err } func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { - var tableFullName string - - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) - if err != nil { - return fmt.Errorf("PostgresStore Put operation can not parse file path %s: err is %v", fullFilePath, err) - } var old_fid string - if old_fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil && err != sql.ErrNoRows { + if old_fid, err = s.query(fullFilePath); err != nil && err != sql.ErrNoRows { return fmt.Errorf("PostgresStore Put operation failed when querying path %s: err is %v", fullFilePath, err) } else { if len(old_fid) == 0 { - err = s.insert(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + err = s.insert(fullFilePath, fid) if err != nil { return fmt.Errorf("PostgresStore Put operation failed when inserting path %s with fid %s : err is %v", fullFilePath, fid, err) } } else { - err = s.update(fullFilePath, fid, s.dbs[instance_offset], tableFullName) + err = s.update(fullFilePath, fid) if err != nil { return fmt.Errorf("PostgresStore Put operation failed when updating path %s with fid %s : err is %v", fullFilePath, fid, err) } @@ -232,16 +231,15 @@ func (s *PostgresStore) Put(fullFilePath string, fid string) (err error) { func (s *PostgresStore) Delete(fullFilePath string) (err error) { var fid string - instance_offset, tableFullName, err := s.parseFilerMappingInfo(fullFilePath) if err != nil { return fmt.Errorf("PostgresStore Delete operation can not parse file path %s: err is %v", fullFilePath, err) } - if fid, err = s.query(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + if fid, err = s.query(fullFilePath); err != nil { return fmt.Errorf("PostgresStore Delete operation failed when querying path %s: err is %v", fullFilePath, err) } else if fid == "" { return nil } - if err = s.delete(fullFilePath, s.dbs[instance_offset], tableFullName); err != nil { + if err = s.delete(fullFilePath); err != nil { return fmt.Errorf("PostgresStore Delete operation failed when deleting path %s: err is %v", fullFilePath, err) } else { return nil @@ -249,39 +247,87 @@ func (s *PostgresStore) Delete(fullFilePath string) (err error) { } func (s *PostgresStore) Close() { - for _, db := range s.dbs { - db.Close() + s.db.Close() +} + +func (s *PostgresStore) FindDir(dirPath string) (dirId filer.DirectoryId, err error) { + dirId, _, err = s.lookupDirectory(dirPath) + return dirId, err +} + +func (s *PostgresStore) ListDirs(dirPath string) (dirs []filer.DirectoryEntry, err error) { + dirs, err = s.findDirectories(dirPath, 20) + + glog.V(3).Infof("Postgres ListDirs = found %d directories under %s", len(dirs), dirPath) + + return dirs, err +} + +func (s *PostgresStore) DeleteDir(dirPath string, recursive bool) (err error) { + err = s.deleteDirectory(dirPath, recursive) + if err != nil { + glog.V(0).Infof("Error in Postgres DeleteDir '%s' (recursive = '%t'): %s", err) } + return err } -var createTable = ` +func (s *PostgresStore) FindFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { + files, err = s.findFiles(dirPath, lastFileName, limit) + + return files, err +} + +var createDirectoryTable = ` CREATE TABLE IF NOT EXISTS %s ( id BIGSERIAL NOT NULL, - uriPath VARCHAR(1024) NOT NULL DEFAULT '', + directoryRoot VARCHAR(1024) NOT NULL DEFAULT '', + directoryName VARCHAR(1024) NOT NULL DEFAULT '', + CONSTRAINT unique_directory UNIQUE (directoryRoot, directoryName) +); +` + +var createFileTable = ` + +CREATE TABLE IF NOT EXISTS %s ( + id BIGSERIAL NOT NULL, + directoryPart VARCHAR(1024) NOT NULL DEFAULT '', + filePart VARCHAR(1024) NOT NULL DEFAULT '', fid VARCHAR(36) NOT NULL DEFAULT '', createTime BIGINT NOT NULL DEFAULT 0, updateTime BIGINT NOT NULL DEFAULT 0, remark VARCHAR(20) NOT NULL DEFAULT '', status SMALLINT NOT NULL DEFAULT '1', PRIMARY KEY (id), - CONSTRAINT %s_index_uriPath UNIQUE (uriPath) + CONSTRAINT %s_unique_file UNIQUE (directoryPart, filePart) ); ` -func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) error { - var realTableName string - if s.isSharding { - realTableName = fmt.Sprintf("%s_%04d", tableName, postfix) - } else { - realTableName = tableName - } - - glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", realTableName) - - sqlCreate := fmt.Sprintf(createTable, realTableName, realTableName) - - stmt, err := db.Prepare(sqlCreate) +func (s *PostgresStore) createDirectoriesTable() error { + glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", directoriesTableName) + + sqlCreate := fmt.Sprintf(createDirectoryTable, directoriesTableName) + + stmt, err := s.db.Prepare(sqlCreate) + if err != nil { + return err + } + defer stmt.Close() + + _, err = stmt.Exec() + if err != nil { + return err + } + return nil +} + +func (s *PostgresStore) createFilesTable() error { + + glog.V(3).Infoln("Creating postgres table if it doesn't exist: ", filesTableName) + + sqlCreate := fmt.Sprintf(createFileTable, filesTableName, filesTableName) + + stmt, err := s.db.Prepare(sqlCreate) if err != nil { return err } @@ -294,27 +340,29 @@ func (s *PostgresStore) createTables(db *sql.DB, tableName string, postfix int) return nil } -func (s *PostgresStore) query(uriPath string, db *sql.DB, tableName string) (string, error) { - sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE uriPath=$1", tableName) - - row := db.QueryRow(sqlStatement, uriPath) +func (s *PostgresStore) query(uriPath string) (string, error) { + directoryPart, filePart := filepath.Split(uriPath) + sqlStatement := fmt.Sprintf("SELECT fid FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName) + + row := s.db.QueryRow(sqlStatement, directoryPart, filePart) var fid string err := row.Scan(&fid) - + glog.V(3).Infof("Postgres query -- looking up path '%s' and found id '%s' ", uriPath, fid) - + if err != nil { return "", err } return fid, nil } -func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE uriPath=$3", tableName) - +func (s *PostgresStore) update(uriPath string, fid string) error { + directoryPart, filePart := filepath.Split(uriPath) + sqlStatement := fmt.Sprintf("UPDATE %s SET fid=$1, updateTime=$2 WHERE directoryPart=$3 AND filePart=$4", filesTableName) + glog.V(3).Infof("Postgres query -- updating path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, fid, time.Now().Unix(), uriPath) + + res, err := s.db.Exec(sqlStatement, fid, time.Now().Unix(), directoryPart, filePart) if err != nil { return err } @@ -326,33 +374,83 @@ func (s *PostgresStore) update(uriPath string, fid string, db *sql.DB, tableName return nil } -func (s *PostgresStore) insert(uriPath string, fid string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("INSERT INTO %s (uriPath,fid,createTime) VALUES($1, $2, $3)", tableName) - +func (s *PostgresStore) insert(uriPath string, fid string) error { + directoryPart, filePart := filepath.Split(uriPath) + + existingId, _, _ := s.lookupDirectory(directoryPart) + if existingId == 0 { + s.recursiveInsertDirectory(directoryPart) + } + + sqlStatement := fmt.Sprintf("INSERT INTO %s (directoryPart,filePart,fid,createTime) VALUES($1, $2, $3, $4)", filesTableName) glog.V(3).Infof("Postgres query -- inserting path '%s' with id '%s'", uriPath, fid) - - res, err := db.Exec(sqlStatement, uriPath, fid, time.Now().Unix()) - + + res, err := s.db.Exec(sqlStatement, directoryPart, filePart, fid, time.Now().Unix()) + if err != nil { return err } - rows, err := res.RowsAffected() + rows, err := res.RowsAffected() if rows != 1 { return fmt.Errorf("Postgres insert -- rows affected = %d. Expecting 1", rows) - } + } if err != nil { return err } + return nil } -func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) error { - sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE uriPath=$1", tableName) - +func (s *PostgresStore) recursiveInsertDirectory(dirPath string) { + pathParts := strings.Split(dirPath, "/") + + var workingPath string = "/" + for _, part := range pathParts { + if part == "" { + continue + } + workingPath += (part + "/") + existingId, _, _ := s.lookupDirectory(workingPath) + if existingId == 0 { + s.insertDirectory(workingPath) + } + } +} + +func (s *PostgresStore) insertDirectory(dirPath string) { + pathParts := strings.Split(dirPath, "/") + + directoryRoot := "/" + directoryName := "" + if len(pathParts) > 1 { + directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/" + directoryName = strings.Join(pathParts[len(pathParts)-2:], "/") + } else if len(pathParts) == 1 { + directoryRoot = "/" + directoryName = pathParts[0] + "/" + } + sqlInsertDirectoryStatement := fmt.Sprintf("INSERT INTO %s (directoryroot, directoryname) "+ + "SELECT $1, $2 WHERE NOT EXISTS ( SELECT id FROM %s WHERE directoryroot=$3 AND directoryname=$4 )", + directoriesTableName, directoriesTableName) + + glog.V(4).Infof("Postgres query -- Inserting directory (if it doesn't exist) - root = %s, name = %s", + directoryRoot, directoryName) + + _, err := s.db.Exec(sqlInsertDirectoryStatement, directoryRoot, directoryName, directoryRoot, directoryName) + if err != nil { + glog.V(0).Infof("Postgres query -- Error inserting directory - root = %s, name = %s: %s", + directoryRoot, directoryName, err) + } +} + +func (s *PostgresStore) delete(uriPath string) error { + directoryPart, filePart := filepath.Split(uriPath) + sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directoryPart=$1 AND filePart=$2", filesTableName) + glog.V(3).Infof("Postgres query -- deleting path '%s'", uriPath) - - res, err := db.Exec(sqlStatement, uriPath) + + res, err := s.db.Exec(sqlStatement, directoryPart, filePart) if err != nil { return err } @@ -362,4 +460,164 @@ func (s *PostgresStore) delete(uriPath string, db *sql.DB, tableName string) err return err } return nil -} \ No newline at end of file +} + +func (s *PostgresStore) lookupDirectory(dirPath string) (filer.DirectoryId, string, error) { + directoryRoot, directoryName := s.mySplitPath(dirPath) + + sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName) + + row := s.db.QueryRow(sqlStatement, directoryRoot, directoryName) + var id filer.DirectoryId + var dirRoot string + var dirName string + err := row.Scan(&id, &dirRoot, &dirName) + + glog.V(3).Infof("Postgres lookupDirectory -- looking up directory '%s' and found id '%d', root '%s', name '%s' ", dirPath, id, dirRoot, dirName) + + if err != nil { + return 0, "", err + } + return id, filepath.Join(dirRoot, dirName), err +} + +func (s *PostgresStore) findDirectories(dirPath string, limit int) (dirs []filer.DirectoryEntry, err error) { + sqlStatement := fmt.Sprintf("SELECT id, directoryroot, directoryname FROM %s WHERE directoryRoot=$1 AND directoryName != '' ORDER BY id LIMIT $2", directoriesTableName) + rows, err := s.db.Query(sqlStatement, dirPath, limit) + + if err != nil { + glog.V(0).Infof("Postgres findDirectories error: %s", err) + } + + if rows != nil { + defer rows.Close() + for rows.Next() { + var id filer.DirectoryId + var directoryRoot string + var directoryName string + + scanErr := rows.Scan(&id, &directoryRoot, &directoryName) + if scanErr != nil { + err = scanErr + } + dirs = append(dirs, filer.DirectoryEntry{Name: (directoryName), Id: id}) + } + } + return +} + +func (s *PostgresStore) safeToDeleteDirectory(dirPath string, recursive bool) bool { + if recursive { + return true + } + sqlStatement := fmt.Sprintf("SELECT id FROM %s WHERE directoryRoot LIKE $1 LIMIT 1", directoriesTableName) + row := s.db.QueryRow(sqlStatement, dirPath+"%") + + var id filer.DirectoryId + err := row.Scan(&id) + if err != nil { + if err == sql.ErrNoRows { + return true + } + } + return false +} + +func (s *PostgresStore) mySplitPath(dirPath string) (directoryRoot string, directoryName string) { + pathParts := strings.Split(dirPath, "/") + directoryRoot = "/" + directoryName = "" + if len(pathParts) > 1 { + directoryRoot = strings.Join(pathParts[0:len(pathParts)-2], "/") + "/" + directoryName = strings.Join(pathParts[len(pathParts)-2:], "/") + } else if len(pathParts) == 1 { + directoryRoot = "/" + directoryName = pathParts[0] + "/" + } + return directoryRoot, directoryName +} + +func (s *PostgresStore) deleteDirectory(dirPath string, recursive bool) (err error) { + directoryRoot, directoryName := s.mySplitPath(dirPath) + + // delete files + sqlStatement := fmt.Sprintf("DELETE FROM %s WHERE directorypart=$1", filesTableName) + _, err = s.db.Exec(sqlStatement, dirPath) + if err != nil { + return err + } + + // delete specific directory if it is empty or recursive delete was requested + safeToDelete := s.safeToDeleteDirectory(dirPath, recursive) + if safeToDelete { + sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot=$1 AND directoryName=$2", directoriesTableName) + _, err = s.db.Exec(sqlStatement, directoryRoot, directoryName) + if err != nil { + return err + } + } + + if recursive { + // delete descendant files + sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directorypart LIKE $1", filesTableName) + _, err = s.db.Exec(sqlStatement, dirPath+"%") + if err != nil { + return err + } + + // delete descendant directories + sqlStatement = fmt.Sprintf("DELETE FROM %s WHERE directoryRoot LIKE $1", directoriesTableName) + _, err = s.db.Exec(sqlStatement, dirPath+"%") + if err != nil { + return err + } + } + + return err +} + +func (s *PostgresStore) findFiles(dirPath string, lastFileName string, limit int) (files []filer.FileEntry, err error) { + var rows *sql.Rows = nil + + if lastFileName == "" { + sqlStatement := + fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 ORDER BY id LIMIT $2", filesTableName) + rows, err = s.db.Query(sqlStatement, dirPath, limit) + } else { + sqlStatement := + fmt.Sprintf("SELECT fid, directorypart, filepart FROM %s WHERE directorypart=$1 "+ + "AND id > (SELECT id FROM %s WHERE directoryPart=$2 AND filepart=$3) ORDER BY id LIMIT $4", + filesTableName, filesTableName) + _, lastFileNameName := filepath.Split(lastFileName) + rows, err = s.db.Query(sqlStatement, dirPath, dirPath, lastFileNameName, limit) + } + + if err != nil { + glog.V(0).Infof("Postgres find files error: %s", err) + } + + if rows != nil { + defer rows.Close() + + for rows.Next() { + var fid filer.FileId + var directoryPart string + var filePart string + + scanErr := rows.Scan(&fid, &directoryPart, &filePart) + if scanErr != nil { + err = scanErr + } + + files = append(files, filer.FileEntry{Name: filepath.Join(directoryPart, filePart), Id: fid}) + if len(files) >= limit { + break + } + } + } + + glog.V(3).Infof("Postgres findFiles -- looking up files under '%s' and found %d files. Limit=%d, lastFileName=%s", + dirPath, len(files), limit, lastFileName) + + return files, err +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index fc7799efc..4a0b2103b 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -25,7 +25,7 @@ import ( type filerConf struct { MysqlConf []mysql_store.MySqlConf `json:"mysql"` mysql_store.ShardingConf - PostgresConf []postgres_store.PostgresConf `json:"postgres"` + PostgresConf *postgres_store.PostgresConf `json:"postgres"` } func parseConfFile(confPath string) (*filerConf, error) { @@ -88,9 +88,8 @@ func NewFilerServer(r *http.ServeMux, ip string, port int, master string, dir st if setting.MysqlConf != nil && len(setting.MysqlConf) != 0 { mysql_store := mysql_store.NewMysqlStore(setting.MysqlConf, setting.IsSharding, setting.ShardCount) fs.filer = flat_namespace.NewFlatNamespaceFiler(master, mysql_store) - } else if setting.PostgresConf != nil && len(setting.PostgresConf) != 0 { - postgres_store := postgres_store.NewPostgresStore(setting.PostgresConf, setting.IsSharding, setting.ShardCount) - fs.filer = flat_namespace.NewFlatNamespaceFiler(master, postgres_store) + } else if setting.PostgresConf != nil { + fs.filer = postgres_store.NewPostgresStore(master, *setting.PostgresConf) } else if cassandra_server != "" { cassandra_store, err := cassandra_store.NewCassandraStore(cassandra_keyspace, cassandra_server) if err != nil { From 5b18cf09e7643d08f28f0e963c56474eafb35747 Mon Sep 17 00:00:00 2001 From: listeng Date: Mon, 26 Dec 2016 16:49:43 +0800 Subject: [PATCH 30/61] Return json data when content-type is "application/json" Before #344, the filer will return json data, and cschiano make a template that render to html. But sometimes need json data, so I add some code will return json data when content-type is "application/json". --- weed/server/filer_server_handlers_read.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index bf95e37b9..5bd59b23f 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -74,7 +74,12 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName, shouldDisplayLoadMore, } - ui.StatusTpl.Execute(w, args) + + if strings.ToLower(r.Header.Get("Content-Type")) == "application/json" { + writeJsonQuiet(w, r, http.StatusOK, args) + } else { + ui.StatusTpl.Execute(w, args) + } } func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { From d96d0a87cfb9aa26b83702a72a3affe6d72697b3 Mon Sep 17 00:00:00 2001 From: lixianbin Date: Wed, 4 Jan 2017 11:23:40 +0800 Subject: [PATCH 31/61] fix bug: upload big .gz file more than maxMB --- weed/command/upload.go | 2 +- weed/operation/submit.go | 11 +++----- weed/storage/needle.go | 54 +++++++++++++++++++++------------------- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/weed/command/upload.go b/weed/command/upload.go index 1f0696f70..d7a468610 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -63,7 +63,7 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { secret := security.Secret(*upload.secretKey) - if len(cmdUpload.Flag.Args()) == 0 { + if len(args) == 0 { if *upload.dir == "" { return false } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 54b6e164e..1de6b544a 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -92,18 +92,15 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { } ret.Reader = fh - if fi, fiErr := fh.Stat(); fiErr != nil { + fi, fiErr := fh.Stat() + if fiErr != nil { glog.V(0).Info("Failed to stat file:", fullPathFilename) return ret, fiErr - } else { - ret.ModTime = fi.ModTime().UTC().Unix() - ret.FileSize = fi.Size() } + ret.ModTime = fi.ModTime().UTC().Unix() + ret.FileSize = fi.Size() ext := strings.ToLower(path.Ext(fullPathFilename)) ret.IsGzipped = ext == ".gz" - if ret.IsGzipped { - ret.FileName = fullPathFilename[0 : len(fullPathFilename)-3] - } ret.FileName = fullPathFilename if ext != "" { ret.MimeType = mime.TypeByExtension(ext) diff --git a/weed/storage/needle.go b/weed/storage/needle.go index 29549b323..daa050be8 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -106,35 +106,39 @@ func ParseUpload(r *http.Request) ( } } - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } - if part.Header.Get("Content-Encoding") == "gzip" { - isGzipped = true - } else if operation.IsGzippable(ext, mtype) { - if data, e = operation.GzipData(data); e != nil { - return + isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + isGzipped = false + if !isChunkedFile { + dotIndex := strings.LastIndex(fileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(fileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && mtype != contentType { + mimeType = contentType //only return mime type if not deductable + mtype = contentType + } + if part.Header.Get("Content-Encoding") == "gzip" { + isGzipped = true + } else if operation.IsGzippable(ext, mtype) { + if data, e = operation.GzipData(data); e != nil { + return + } + isGzipped = true + } + if ext == ".gz" { + isGzipped = true + } + if strings.HasSuffix(fileName, ".gz") && + !strings.HasSuffix(fileName, ".tar.gz") { + fileName = fileName[:len(fileName)-3] } - isGzipped = true - } - if ext == ".gz" { - isGzipped = true - } - if strings.HasSuffix(fileName, ".gz") && - !strings.HasSuffix(fileName, ".tar.gz") { - fileName = fileName[:len(fileName)-3] } modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) ttl, _ = ReadTTL(r.FormValue("ttl")) - isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + return } func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) { From 2d13382c68c6337145ab3970d3930ca093b01ce9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 21:14:46 -0800 Subject: [PATCH 32/61] add releasing configs --- .gitignore | 1 + .travis.yml | 27 +++++++++++++++++++++------ Makefile | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index cb0085ab4..4e904d723 100644 --- a/.gitignore +++ b/.gitignore @@ -76,3 +76,4 @@ crashlytics.properties crashlytics-build.properties test_data +build diff --git a/.travis.yml b/.travis.yml index 2344d4850..c72eb8fd1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,30 @@ sudo: false language: go go: - - 1.5 - - 1.6 - - tip +- 1.5 +- 1.6 +- tip before_install: - - export PATH=/home/travis/gopath/bin:$PATH +- export PATH=/home/travis/gopath/bin:$PATH install: - - go get ./weed/... +- go get ./weed/... script: - - go test ./weed/... +- go test ./weed/... + +deploy: + provider: releases + api_key: + secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= + file: + - build/linux_arm.tar.gz + - build/linux_arm64.tar.gz + - build/linux_386.tar.gz + - build/linux_amd64.tar.gz + - build/darwin_amd64.tar.gz + - build/windows_386.zip + - build/windows_amd64.zip + on: + repo: chrislusf/seaweedfs diff --git a/Makefile b/Makefile index 4d09803fc..0ed98b9ec 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,26 @@ BINARY = weed/weed +package = github.com/chrislusf/seaweedfs/weed GO_FLAGS = #-v SOURCE_DIR = ./weed/ +appname := weed + +sources := $(wildcard *.go) + +build = GOOS=$(1) GOARCH=$(2) go build -o build/$(appname)$(3) $(SOURCE_DIR) +tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) +zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) + + all: build -.PHONY : clean deps build linux +.PHONY : clean deps build linux release windows_build darwin_build linux_build clean clean: go clean -i $(GO_FLAGS) $(SOURCE_DIR) rm -f $(BINARY) + rm -rf build/ deps: go get $(GO_FLAGS) -d $(SOURCE_DIR) @@ -20,3 +31,42 @@ build: deps linux: deps mkdir -p linux GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) + +release: windows_build darwin_build linux_build + +##### LINUX BUILDS ##### +linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz + +build/linux_386.tar.gz: $(sources) + $(call build,linux,386,) + $(call tar,linux,386) + +build/linux_amd64.tar.gz: $(sources) + $(call build,linux,amd64,) + $(call tar,linux,amd64) + +build/linux_arm.tar.gz: $(sources) + $(call build,linux,arm,) + $(call tar,linux,arm) + +build/linux_arm64.tar.gz: $(sources) + $(call build,linux,arm64,) + $(call tar,linux,arm64) + +##### DARWIN (MAC) BUILDS ##### +darwin_build: build/darwin_amd64.tar.gz + +build/darwin_amd64.tar.gz: $(sources) + $(call build,darwin,amd64,) + $(call tar,darwin,amd64) + +##### WINDOWS BUILDS ##### +windows_build: build/windows_386.zip build/windows_amd64.zip + +build/windows_386.zip: $(sources) + $(call build,windows,386,.exe) + $(call zip,windows,386,.exe) + +build/windows_amd64.zip: $(sources) + $(call build,windows,amd64,.exe) + $(call zip,windows,amd64,.exe) \ No newline at end of file From 8fb29e523009901d7bcc5adff8a3da83ea515a18 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 21:17:54 -0800 Subject: [PATCH 33/61] change to 0.71 version --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 6b6b0b911..7f38d1e75 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "0.71 beta" + VERSION = "0.71" ) From 4d3353750e78e2b98f61e0543fabf57b4a4f7daf Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 21:28:28 -0800 Subject: [PATCH 34/61] fix build issue --- weed/images/orientation_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/images/orientation_test.go b/weed/images/orientation_test.go index adab17ff8..32fa38f76 100644 --- a/weed/images/orientation_test.go +++ b/weed/images/orientation_test.go @@ -2,6 +2,7 @@ package images import ( "io/ioutil" + "os" "testing" ) @@ -14,4 +15,6 @@ func TestXYZ(t *testing.T) { ioutil.WriteFile("fixed1.jpg", fixed_data, 0644) + os.Remove("fixed1.jpg") + } From dc42d153525afe1e291d1bd7f8de5e8ca2b3c31e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 21:32:59 -0800 Subject: [PATCH 35/61] release 0.72 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 7f38d1e75..4912e546c 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -1,5 +1,5 @@ package util const ( - VERSION = "0.71" + VERSION = "0.72" ) From d1562e48e1bfe2eb9d8e852d0bbb8d6f1f44f024 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:08:53 -0800 Subject: [PATCH 36/61] default to release --- .travis.yml | 9 +++++++++ Makefile | 49 +++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index c72eb8fd1..d6706a3e5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,5 +26,14 @@ deploy: - build/darwin_amd64.tar.gz - build/windows_386.zip - build/windows_amd64.zip + - build/freebsd_arm.tar.gz + - build/freebsd_amd64.tar.gz + - build/freebsd_386.tar.gz + - build/netbsd_arm.tar.gz + - build/netbsd_amd64.tar.gz + - build/netbsd_386.tar.gz + - build/openbsd_arm.tar.gz + - build/openbsd_amd64.tar.gz + - build/openbsd_386.tar.gz on: repo: chrislusf/seaweedfs diff --git a/Makefile b/Makefile index 0ed98b9ec..f3abbc79c 100644 --- a/Makefile +++ b/Makefile @@ -13,9 +13,9 @@ tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) -all: build +all: release -.PHONY : clean deps build linux release windows_build darwin_build linux_build clean +.PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean clean: go clean -i $(GO_FLAGS) $(SOURCE_DIR) @@ -32,7 +32,7 @@ linux: deps mkdir -p linux GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) -release: windows_build darwin_build linux_build +release: windows_build darwin_build linux_build bsd_build ##### LINUX BUILDS ##### linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz @@ -69,4 +69,45 @@ build/windows_386.zip: $(sources) build/windows_amd64.zip: $(sources) $(call build,windows,amd64,.exe) - $(call zip,windows,amd64,.exe) \ No newline at end of file + $(call zip,windows,amd64,.exe) + +##### BSD BUILDS ##### +bsd_build: build/freebsd_arm.tar.gz build/freebsd_386.tar.gz build/freebsd_amd64.tar.gz \ + build/netbsd_arm.tar.gz build/netbsd_386.tar.gz build/netbsd_amd64.tar.gz \ + build/openbsd_arm.tar.gz build/openbsd_386.tar.gz build/openbsd_amd64.tar.gz + +build/freebsd_386.tar.gz: $(sources) + $(call build,freebsd,386,) + $(call tar,freebsd,386) + +build/freebsd_amd64.tar.gz: $(sources) + $(call build,freebsd,amd64,) + $(call tar,freebsd,amd64) + +build/freebsd_arm.tar.gz: $(sources) + $(call build,freebsd,arm,) + $(call tar,freebsd,arm) + +build/netbsd_386.tar.gz: $(sources) + $(call build,netbsd,386,) + $(call tar,netbsd,386) + +build/netbsd_amd64.tar.gz: $(sources) + $(call build,netbsd,amd64,) + $(call tar,netbsd,amd64) + +build/netbsd_arm.tar.gz: $(sources) + $(call build,netbsd,arm,) + $(call tar,netbsd,arm) + +build/openbsd_386.tar.gz: $(sources) + $(call build,openbsd,386,) + $(call tar,openbsd,386) + +build/openbsd_amd64.tar.gz: $(sources) + $(call build,openbsd,amd64,) + $(call tar,openbsd,amd64) + +build/openbsd_arm.tar.gz: $(sources) + $(call build,openbsd,arm,) + $(call tar,openbsd,arm) From 9cb034d49efc3759217b0858f3f85f46f0c49d1d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:11:33 -0800 Subject: [PATCH 37/61] adjust makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f3abbc79c..947a9a75f 100644 --- a/Makefile +++ b/Makefile @@ -32,7 +32,7 @@ linux: deps mkdir -p linux GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) -release: windows_build darwin_build linux_build bsd_build +release: deps windows_build darwin_build linux_build bsd_build ##### LINUX BUILDS ##### linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz From 19159ec479c40e79425e15fb671a5bb551e1c81e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:20:15 -0800 Subject: [PATCH 38/61] adjust makefile and travis --- .travis.yml | 1 + Makefile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d6706a3e5..772263077 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,7 @@ install: script: - go test ./weed/... +- make release deploy: provider: releases diff --git a/Makefile b/Makefile index 947a9a75f..aabd7b448 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) -all: release +all: build .PHONY : clean deps build linux release windows_build darwin_build linux_build bsd_build clean From 83ce8c30fcdf4029b9760660c01a2ea362119a55 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:36:39 -0800 Subject: [PATCH 39/61] skip cleanup on travis deployment --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 772263077..69b3a70ee 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,7 @@ script: deploy: provider: releases + skip_cleanup: true api_key: secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= file: @@ -37,4 +38,5 @@ deploy: - build/openbsd_amd64.tar.gz - build/openbsd_386.tar.gz on: - repo: chrislusf/seaweedfs + branch: release + repo: chrislusf/seaweedfs From f9c20aff9dea09fe2feeb73535364bc9282d06a9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:38:08 -0800 Subject: [PATCH 40/61] deploy on tags --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 69b3a70ee..604cf7f50 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,5 +38,5 @@ deploy: - build/openbsd_amd64.tar.gz - build/openbsd_386.tar.gz on: - branch: release + tags: true repo: chrislusf/seaweedfs From 0d8cf87a8c86e12953d4f6b3898c4f4a4d20ee05 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 3 Jan 2017 22:40:04 -0800 Subject: [PATCH 41/61] remove tab --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 604cf7f50..b5021298b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,4 +39,4 @@ deploy: - build/openbsd_386.tar.gz on: tags: true - repo: chrislusf/seaweedfs + repo: chrislusf/seaweedfs From 47b6ce93f833cec35ba5d7681ba0d110d99530fc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Jan 2017 00:29:58 -0800 Subject: [PATCH 42/61] download from github release page --- docker/Dockerfile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 21e5a7b47..0f64687d2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -3,9 +3,10 @@ FROM progrium/busybox RUN opkg-install curl RUN echo tlsv1 >> ~/.curlrc -RUN curl -Lks https://bintray.com$(curl -Lk http://bintray.com/chrislusf/seaweedfs/seaweedfs/_latestVersion | grep linux_amd64.tar.gz | sed -n "/href/ s/.*href=['\"]\([^'\"]*\)['\"].*/\1/gp") | gunzip | tar -xf - && \ - mv go_*amd64/weed /usr/bin/ && \ - rm -r go_*amd64 +RUN curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o '/chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz' | wget --base=http://github.com/ -i - && \ + tar xzvf linux_amd64.tar.gz && \ + mv weed /usr/bin/ && \ + rm -r linux_amd64.tar.gz EXPOSE 8080 EXPOSE 9333 From e2100d06269ffd32a3a64e0fee7719c99a9abe12 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Jan 2017 00:33:22 -0800 Subject: [PATCH 43/61] move links from bintray to github release page --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 41b8f6be3..d0ef9486e 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs) [![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed) [![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki) -[![](https://api.bintray.com/packages/chrislusf/seaweedfs/seaweedfs/images/download.png)](https://bintray.com/chrislusf/seaweedfs/seaweedfs) +[download](https://github.com/chrislusf/seaweedfs/releases/latest) ![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png) @@ -24,8 +24,7 @@ There is only a 40 bytes disk storage overhead for each file's metadata. It is s SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). SeaweedFS is currently growing, with more features on the way. -Download latest compiled binaries for different platforms here: -[![](https://api.bintray.com/packages/chrislusf/seaweedfs/seaweedfs/images/download.png)](https://bintray.com/chrislusf/seaweedfs/seaweedfs) +[Download latest compiled binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) [SeaweedFS Discussion Group](https://groups.google.com/d/forum/seaweedfs) From 4d1ac7332e8ecb50e34bdc255fc7775405417eae Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Jan 2017 01:59:26 -0800 Subject: [PATCH 44/61] better organized travis --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b5021298b..ab9bab68d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,8 +13,9 @@ install: script: - go test ./weed/... -- make release +before_deploy: + - make release deploy: provider: releases skip_cleanup: true @@ -40,3 +41,4 @@ deploy: on: tags: true repo: chrislusf/seaweedfs + go: tip From e61c9af523ecf68877957070952d5372a02b3664 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Jan 2017 19:58:27 -0800 Subject: [PATCH 45/61] check Accept instead of content type fix https://github.com/chrislusf/seaweedfs/issues/429 --- weed/server/filer_server_handlers_read.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 5bd59b23f..e95c7fcd0 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -74,8 +74,8 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName, shouldDisplayLoadMore, } - - if strings.ToLower(r.Header.Get("Content-Type")) == "application/json" { + + if r.Header.Get("Accept") == "application/json" { writeJsonQuiet(w, r, http.StatusOK, args) } else { ui.StatusTpl.Execute(w, args) From 4f28876c8d6cd8a5c98ede1cea4f863d4ab74d09 Mon Sep 17 00:00:00 2001 From: vancepym Date: Thu, 5 Jan 2017 21:52:55 +0800 Subject: [PATCH 46/61] Update Dockerfile Replace with Alpine Linux based image --- docker/Dockerfile | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 0f64687d2..e52323c25 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,12 +1,10 @@ -FROM progrium/busybox +FROM frolvlad/alpine-glibc:alpine-3.4 -RUN opkg-install curl -RUN echo tlsv1 >> ~/.curlrc - -RUN curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o '/chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz' | wget --base=http://github.com/ -i - && \ - tar xzvf linux_amd64.tar.gz && \ - mv weed /usr/bin/ && \ - rm -r linux_amd64.tar.gz +RUN apk add --no-cache --virtual=build-dependencies --update curl wget ca-certificates && \ + wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o '/chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ + tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ + apk del curl wget ca-certificates build-dependencies && \ + rm -rf /tmp/* EXPOSE 8080 EXPOSE 9333 From 54dd925283f443b09db77717c86eea5bf0f9d740 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 5 Jan 2017 14:53:05 -0800 Subject: [PATCH 47/61] remove deprecated instructions --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index d0ef9486e..d25b79247 100644 --- a/README.md +++ b/README.md @@ -170,8 +170,6 @@ Volume servers can start with a specific data center name: weed volume -dir=/tmp/2 -port=8081 -dataCenter=dc2 ``` -Or the master server can determine the data center via volume server's IP address and settings in weed.conf file. - When requesting a file key, an optional "dataCenter" parameter can limit the assigned volume to the specific data center. For example, this specifies that the assigned volume should be limited to 'dc1': ``` From 13e7069eb9cd72f94e72acb8fbbc9dd0307da703 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 6 Jan 2017 10:22:20 -0800 Subject: [PATCH 48/61] keep track of total data file size --- unmaintained/fix_dat/fix_dat.go | 2 +- weed/command/fix.go | 2 +- weed/storage/disk_location.go | 7 ++++++- weed/storage/needle.go | 2 ++ weed/storage/needle_map.go | 2 +- weed/storage/needle_map_boltdb.go | 6 +++--- weed/storage/needle_map_leveldb.go | 6 +++--- weed/storage/needle_map_memory.go | 6 +++--- weed/storage/needle_read_write.go | 14 +++++++++----- weed/storage/volume.go | 1 + weed/storage/volume_checking.go | 22 ++++++++++++++-------- weed/storage/volume_loading.go | 3 ++- weed/storage/volume_read_write.go | 30 +++++++++++++++++++++++------- weed/storage/volume_super_block.go | 1 + weed/storage/volume_sync.go | 2 +- weed/storage/volume_vacuum.go | 10 +++++----- 16 files changed, 76 insertions(+), 40 deletions(-) diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index bcb985fe9..1f95e6cd6 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -60,7 +60,7 @@ func main() { iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) { fmt.Printf("file id=%d name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) - s, e := n.Append(newDatFile, storage.Version2) + s, _, e := n.Append(newDatFile, storage.Version2) fmt.Printf("size %d error %v\n", s, e) }) diff --git a/weed/command/fix.go b/weed/command/fix.go index 2ec74d026..22480dcd0 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -58,7 +58,7 @@ func runFix(cmd *Command, args []string) bool { glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return nm.Delete(n.Id) + return nm.Delete(n.Id, uint32(offset/storage.NeedlePaddingSize)) } return nil }) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index e7604a734..039b4f3b9 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -40,7 +40,12 @@ func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleM mutex.Lock() l.volumes[vid] = v mutex.Unlock() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) + glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", + l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), v.Size(), v.Ttl.String()) + if v.Size() != v.dataFileSize { + glog.V(0).Infof("data file %s, size=%d expected=%d", + l.Directory+"/"+name, v.Size(), v.dataFileSize) + } } else { glog.V(0).Infof("new volume %s error %s", name, e) } diff --git a/weed/storage/needle.go b/weed/storage/needle.go index daa050be8..1d306395e 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "io/ioutil" + "math" "mime" "net/http" "path" @@ -20,6 +21,7 @@ const ( NeedlePaddingSize = 8 NeedleChecksumSize = 4 MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 + TombstoneFileSize = math.MaxUint32 ) /* diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go index 142018946..15a0387c5 100644 --- a/weed/storage/needle_map.go +++ b/weed/storage/needle_map.go @@ -24,7 +24,7 @@ const ( type NeedleMapper interface { Put(key uint64, offset uint32, size uint32) error Get(key uint64) (element *NeedleValue, ok bool) - Delete(key uint64) error + Delete(key uint64, offset uint32) error Close() Destroy() error ContentSize() uint64 diff --git a/weed/storage/needle_map_boltdb.go b/weed/storage/needle_map_boltdb.go index bd3edf28d..e131ea822 100644 --- a/weed/storage/needle_map_boltdb.go +++ b/weed/storage/needle_map_boltdb.go @@ -63,7 +63,7 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error { } defer db.Close() return WalkIndexFile(indexFile, func(key uint64, offset, size uint32) error { - if offset > 0 { + if offset > 0 && size != TombstoneFileSize { boltDbWrite(db, key, offset, size) } else { boltDbDelete(db, key) @@ -143,12 +143,12 @@ func boltDbDelete(db *bolt.DB, key uint64) error { }) } -func (m *BoltDbNeedleMap) Delete(key uint64) error { +func (m *BoltDbNeedleMap) Delete(key uint64, offset uint32) error { if oldNeedle, ok := m.Get(key); ok { m.logDelete(oldNeedle.Size) } // write to index file first - if err := m.appendToIndexFile(key, 0, 0); err != nil { + if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { return err } return boltDbDelete(m.db, key) diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index 1789dbb12..f025ea360 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -61,7 +61,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { } defer db.Close() return WalkIndexFile(indexFile, func(key uint64, offset, size uint32) error { - if offset > 0 { + if offset > 0 && size != TombstoneFileSize { levelDbWrite(db, key, offset, size) } else { levelDbDelete(db, key) @@ -112,12 +112,12 @@ func levelDbDelete(db *leveldb.DB, key uint64) error { return db.Delete(bytes, nil) } -func (m *LevelDbNeedleMap) Delete(key uint64) error { +func (m *LevelDbNeedleMap) Delete(key uint64, offset uint32) error { if oldNeedle, ok := m.Get(key); ok { m.logDelete(oldNeedle.Size) } // write to index file first - if err := m.appendToIndexFile(key, 0, 0); err != nil { + if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { return err } return levelDbDelete(m.db, key) diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index 195d8bdbc..6fa929d90 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -33,7 +33,7 @@ func LoadNeedleMap(file *os.File) (*NeedleMap, error) { } nm.FileCounter++ nm.FileByteCounter = nm.FileByteCounter + uint64(size) - if offset > 0 { + if offset > 0 && size != TombstoneFileSize { oldSize := nm.m.Set(Key(key), offset, size) glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize) if oldSize > 0 { @@ -92,10 +92,10 @@ func (nm *NeedleMap) Get(key uint64) (element *NeedleValue, ok bool) { element, ok = nm.m.Get(Key(key)) return } -func (nm *NeedleMap) Delete(key uint64) error { +func (nm *NeedleMap) Delete(key uint64, offset uint32) error { deletedBytes := nm.m.Delete(Key(key)) nm.logDelete(deletedBytes) - return nm.appendToIndexFile(key, 0, 0) + return nm.appendToIndexFile(key, offset, TombstoneFileSize) } func (nm *NeedleMap) Close() { _ = nm.indexFile.Close() diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index 3ac236951..8baa325df 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -22,10 +22,10 @@ const ( ) func (n *Needle) DiskSize() int64 { - padding := NeedlePaddingSize - ((NeedleHeaderSize + int64(n.Size) + NeedleChecksumSize) % NeedlePaddingSize) - return NeedleHeaderSize + int64(n.Size) + padding + NeedleChecksumSize + return getActualSize(n.Size) } -func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) { + +func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize int64, err error) { if s, ok := w.(io.Seeker); ok { if end, e := s.Seek(0, 1); e == nil { defer func(s io.Seeker, off int64) { @@ -54,6 +54,7 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) { if _, err = w.Write(n.Data); err != nil { return } + actualSize = NeedleHeaderSize + int64(n.Size) padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) _, err = w.Write(header[0 : NeedleChecksumSize+padding]) @@ -131,9 +132,12 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, err error) { padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) _, err = w.Write(header[0 : NeedleChecksumSize+padding]) - return n.DataSize, err + + actualSize = NeedleHeaderSize + int64(n.Size) + NeedleChecksumSize + int64(padding) + + return n.DataSize, actualSize, err } - return 0, fmt.Errorf("Unsupported Version! (%d)", version) + return 0, 0, fmt.Errorf("Unsupported Version! (%d)", version) } func ReadNeedleBlob(r *os.File, offset int64, size uint32) (dataSlice []byte, block *Block, err error) { diff --git a/weed/storage/volume.go b/weed/storage/volume.go index dfd623eaa..11ee600df 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -15,6 +15,7 @@ type Volume struct { dir string Collection string dataFile *os.File + dataFileSize int64 nm NeedleMapper needleMapKind NeedleMapType readOnly bool diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index 48f707594..6d4011f27 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -7,27 +7,33 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error { +func getActualSize(size uint32) int64 { + padding := NeedlePaddingSize - ((NeedleHeaderSize + size + NeedleChecksumSize) % NeedlePaddingSize) + return NeedleHeaderSize + int64(size) + NeedleChecksumSize + int64(padding) +} + +func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) (int64, error) { var indexSize int64 var e error if indexSize, e = verifyIndexFileIntegrity(indexFile); e != nil { - return fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e) + return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), e) } if indexSize == 0 { - return nil + return int64(SuperBlockSize), nil } var lastIdxEntry []byte if lastIdxEntry, e = readIndexEntryAtOffset(indexFile, indexSize-NeedleIndexSize); e != nil { - return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e) + return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e) } key, offset, size := idxFileEntry(lastIdxEntry) - if offset == 0 { - return nil + if offset == 0 || size == TombstoneFileSize { + return 0, nil } if e = verifyNeedleIntegrity(v.dataFile, v.Version(), int64(offset)*NeedlePaddingSize, key, size); e != nil { - return fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e) + return 0, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e) } - return nil + + return int64(offset)*int64(NeedlePaddingSize) + getActualSize(size), nil } func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) { diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index f2099de83..7bc65a4a3 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -64,7 +64,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind return fmt.Errorf("cannot write Volume Index %s.idx: %v", fileName, e) } } - if e = CheckVolumeDataIntegrity(v, indexFile); e != nil { + if v.dataFileSize, e = CheckVolumeDataIntegrity(v, indexFile); e != nil { v.readOnly = true glog.V(0).Infof("volumeDataIntegrityChecking failed %v", e) } @@ -86,6 +86,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } } } + return e } diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 7458b4879..66f18557f 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -60,6 +60,8 @@ func (v *Volume) AppendBlob(b []byte) (offset int64, err error) { if offset, err = v.dataFile.Seek(0, 2); err != nil { glog.V(0).Infof("failed to seek the end of file: %v", err) return + } else if offset != int64(v.dataFileSize) { + glog.V(0).Infof("dataFileSize %d != actual data file size: %d", v.dataFileSize, offset) } //ensure file writing starting from aligned positions if offset%NeedlePaddingSize != 0 { @@ -67,9 +69,12 @@ func (v *Volume) AppendBlob(b []byte) (offset int64, err error) { if offset, err = v.dataFile.Seek(offset, 0); err != nil { glog.V(0).Infof("failed to align in datafile %s: %v", v.dataFile.Name(), err) return + } else if offset != int64(v.dataFileSize) { + glog.V(0).Infof("dataFileSize %d != actual data file size: %d", v.dataFileSize, offset) } } - v.dataFile.Write(b) + _, err = v.dataFile.Write(b) + v.dataFileSize += int64(len(b)) return } @@ -86,10 +91,12 @@ func (v *Volume) writeNeedle(n *Needle) (size uint32, err error) { glog.V(4).Infof("needle is unchanged!") return } - var offset int64 + var offset, actualSize int64 if offset, err = v.dataFile.Seek(0, 2); err != nil { glog.V(0).Infof("failed to seek the end of file: %v", err) return + } else if offset != int64(v.dataFileSize) { + glog.V(0).Infof("dataFileSize %d != actual data file size: %d", v.dataFileSize, offset) } //ensure file writing starting from aligned positions @@ -101,12 +108,14 @@ func (v *Volume) writeNeedle(n *Needle) (size uint32, err error) { } } - if size, err = n.Append(v.dataFile, v.Version()); err != nil { + if size, actualSize, err = n.Append(v.dataFile, v.Version()); err != nil { if e := v.dataFile.Truncate(offset); e != nil { err = fmt.Errorf("%s\ncannot truncate %s: %v", err, v.dataFile.Name(), e) } return } + v.dataFileSize += actualSize + nv, ok := v.nm.Get(n.Id) if !ok || int64(nv.Offset)*NeedlePaddingSize < offset { if err = v.nm.Put(n.Id, uint32(offset/NeedlePaddingSize), n.Size); err != nil { @@ -128,16 +137,20 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) { defer v.dataFileAccessLock.Unlock() nv, ok := v.nm.Get(n.Id) //fmt.Println("key", n.Id, "volume offset", nv.Offset, "data_size", n.Size, "cached size", nv.Size) - if ok { + if ok && nv.Size != TombstoneFileSize { size := nv.Size - if err := v.nm.Delete(n.Id); err != nil { + // println("adding tombstone", n.Id, "at offset", v.dataFileSize) + if err := v.nm.Delete(n.Id, uint32(v.dataFileSize/NeedlePaddingSize)); err != nil { return size, err } - if _, err := v.dataFile.Seek(0, 2); err != nil { + if offset, err := v.dataFile.Seek(0, 2); err != nil { return size, err + } else if offset != int64(v.dataFileSize) { + glog.V(0).Infof("dataFileSize %d != actual data file size: %d, deleteMarker: %d", v.dataFileSize, offset, getActualSize(0)) } n.Data = nil - _, err := n.Append(v.dataFile, v.Version()) + _, actualSize, err := n.Append(v.dataFile, v.Version()) + v.dataFileSize += actualSize return size, err } return 0, nil @@ -149,6 +162,9 @@ func (v *Volume) readNeedle(n *Needle) (int, error) { if !ok || nv.Offset == 0 { return -1, errors.New("Not Found") } + if nv.Size == TombstoneFileSize { + return -1, errors.New("Already Deleted") + } err := n.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version()) if err != nil { return 0, err diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index fc773273d..ae6ee7c25 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -56,6 +56,7 @@ func (v *Volume) maybeWriteSuperBlock() error { } } } + v.dataFileSize = SuperBlockSize } return e } diff --git a/weed/storage/volume_sync.go b/weed/storage/volume_sync.go index 7448b856f..23d8db510 100644 --- a/weed/storage/volume_sync.go +++ b/weed/storage/volume_sync.go @@ -148,7 +148,7 @@ func fetchVolumeFileEntries(volumeServer string, vid VolumeId) (m CompactMap, la total := 0 err = operation.GetVolumeIdxEntries(volumeServer, vid.String(), func(key uint64, offset, size uint32) { // println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size) - if offset != 0 && size != 0 { + if offset > 0 && size != TombstoneFileSize { m.Set(Key(key), offset, size) } else { m.Delete(Key(key)) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 723300557..f3ded5ff2 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -35,7 +35,7 @@ func (v *Volume) Compact2() error { } func (v *Volume) commitCompact() error { - glog.V(3).Infof("Committing vacuuming...") + glog.V(0).Infof("Committing vacuuming...") v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() glog.V(3).Infof("Got Committing lock...") @@ -189,7 +189,7 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI fakeDelNeedle := new(Needle) fakeDelNeedle.Id = key fakeDelNeedle.Cookie = 0x12345678 - _, err = fakeDelNeedle.Append(dst, v.Version()) + _, _, err = fakeDelNeedle.Append(dst, v.Version()) if err != nil { return } @@ -241,7 +241,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err erro if err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } - if _, err = n.Append(dst, v.Version()); err != nil { + if _, _, err := n.Append(dst, v.Version()); err != nil { return fmt.Errorf("cannot append needle: %s", err) } new_offset += n.DiskSize() @@ -280,7 +280,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { new_offset := int64(SuperBlockSize) WalkIndexFile(oldIndexFile, func(key uint64, offset, size uint32) error { - if size <= 0 { + if offset == 0 || size == TombstoneFileSize { return nil } @@ -302,7 +302,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) { if err = nm.Put(n.Id, uint32(new_offset/NeedlePaddingSize), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } - if _, err = n.Append(dst, v.Version()); err != nil { + if _, _, err = n.Append(dst, v.Version()); err != nil { return fmt.Errorf("cannot append needle: %s", err) } new_offset += n.DiskSize() From 86a7c562751fc89d52da30425f1513b4553dfa8c Mon Sep 17 00:00:00 2001 From: sparklxb Date: Sun, 8 Jan 2017 09:16:29 +0800 Subject: [PATCH 49/61] support additional header name-value pairs --- weed/operation/submit.go | 6 +-- weed/operation/upload_content.go | 28 ++++++++++--- weed/server/common.go | 4 +- weed/server/filer_server_handlers_write.go | 9 ++-- weed/server/volume_server_handlers_read.go | 13 ++++++ weed/storage/needle.go | 48 +++++++++++++++------- weed/storage/needle_read_write.go | 33 ++++++++++++++- weed/storage/store.go | 1 + weed/topology/store_replicate.go | 2 +- 9 files changed, 113 insertions(+), 31 deletions(-) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 1de6b544a..75d5afbde 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -155,7 +155,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret cm.DeleteChunks(master) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt) + ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -180,7 +180,7 @@ func upload_one_chunk(filename string, reader io.Reader, master, fileUrl, fid := "http://"+ret.Url+"/"+ret.Fid, ret.Fid glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "application/octet-stream", jwt) + "application/octet-stream", nil, jwt) if uploadError != nil { return fid, 0, uploadError } @@ -198,6 +198,6 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", jwt) + _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index a87784cad..b5784322a 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -36,13 +36,13 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, jwt security.EncodedJwt) (*UploadResult, error) { +func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairs []byte, jwt security.EncodedJwt) (*UploadResult, error) { return upload_content(uploadUrl, func(w io.Writer) (err error) { _, err = io.Copy(w, reader) return - }, filename, isGzipped, mtype, jwt) + }, filename, isGzipped, mtype, pairs, jwt) } -func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, jwt security.EncodedJwt) (*UploadResult, error) { +func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairs []byte, jwt security.EncodedJwt) (*UploadResult, error) { body_buf := bytes.NewBufferString("") body_writer := multipart.NewWriter(body_buf) h := make(textproto.MIMEHeader) @@ -59,6 +59,14 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if jwt != "" { h.Set("Authorization", "BEARER "+string(jwt)) } + pairMap := make(map[string]string) + if len(pairs) != 0 { + err := json.Unmarshal(pairs, &pairMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + } + file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { glog.V(0).Infoln("error creating form file", cp_err.Error()) @@ -73,7 +81,17 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error glog.V(0).Infoln("error closing body", err) return nil, err } - resp, post_err := client.Post(uploadUrl, content_type, body_buf) + + req, postErr := http.NewRequest("POST", uploadUrl, body_buf) + if postErr != nil { + glog.V(0).Infoln("failing to upload to", uploadUrl, postErr.Error()) + return nil, postErr + } + req.Header.Set("Content-Type", content_type) + for k, v := range pairMap { + req.Header.Set(k, v) + } + resp, post_err := client.Do(req) if post_err != nil { glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) return nil, post_err @@ -86,7 +104,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error var ret UploadResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.V(0).Infoln("failing to read upload resonse", uploadUrl, string(resp_body)) + glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body)) return nil, unmarshal_err } if ret.Error != "" { diff --git a/weed/server/common.go b/weed/server/common.go index dcd31f823..3c9e3014f 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -86,7 +86,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r) + fname, data, mimeType, pairs, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return @@ -112,7 +112,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, jwt) + uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairs, jwt) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 464cb81ef..aed393dcd 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -15,13 +15,14 @@ import ( "net/url" "strings" + "path" + "strconv" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "path" - "strconv" ) type FilerPostResult struct { @@ -112,7 +113,7 @@ func (fs *FilerServer) multipartUploadAnalyzer(w http.ResponseWriter, r *http.Re if r.Method == "PUT" { buf, _ := ioutil.ReadAll(r.Body) r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) - fileName, _, _, _, _, _, _, pe := storage.ParseUpload(r) + fileName, _, _, _, _, _, _, _, pe := storage.ParseUpload(r) if pe != nil { glog.V(0).Infoln("failing to parse post body", pe.Error()) writeJsonError(w, r, http.StatusInternalServerError, pe) @@ -521,7 +522,7 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht err = nil ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, fs.jwt(fileId)) + uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId)) if uploadResult != nil { glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 6944e79e0..2a273c595 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -12,6 +12,8 @@ import ( "strings" "time" + "encoding/json" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" @@ -94,6 +96,17 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } w.Header().Set("Etag", etag) + if n.HasPairs() { + pairMap := make(map[string]string) + err = json.Unmarshal(n.Pairs, &pairMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + for k, v := range pairMap { + w.Header().Set(k, v) + } + } + if vs.tryHandleChunkedFile(n, filename, w, r) { return } diff --git a/weed/storage/needle.go b/weed/storage/needle.go index 1d306395e..ea013e290 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -11,6 +11,8 @@ import ( "strings" "time" + "encoding/json" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" @@ -22,6 +24,7 @@ const ( NeedleChecksumSize = 4 MaxPossibleVolumeSize = 4 * 1024 * 1024 * 1024 * 8 TombstoneFileSize = math.MaxUint32 + PairNamePrefix = "Seaweed-" ) /* @@ -40,6 +43,8 @@ type Needle struct { Name []byte `comment:"maximum 256 characters"` //version2 MimeSize uint8 //version2 Mime []byte `comment:"maximum 256 characters"` //version2 + PairsSize uint16 //version2 + Pairs []byte `comment:"additional name value pairs, json format, maximum 64kB"` LastModified uint64 //only store LastModifiedBytesLength bytes, which is 5 bytes to disk Ttl *TTL @@ -55,8 +60,17 @@ func (n *Needle) String() (str string) { } func ParseUpload(r *http.Request) ( - fileName string, data []byte, mimeType string, isGzipped bool, + fileName string, data []byte, mimeType string, pairs []byte, isGzipped bool, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { + pairMap := make(map[string]string) + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { + pairMap[k] = v[0] + } + } + if len(pairMap) != 0 { + pairs, _ = json.Marshal(pairMap) + } form, fe := r.MultipartReader() if fe != nil { glog.V(0).Infoln("MultipartReader [ERROR]", fe) @@ -109,19 +123,19 @@ func ParseUpload(r *http.Request) ( } isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) - isGzipped = false + dotIndex := strings.LastIndex(fileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(fileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && mtype != contentType { + mimeType = contentType //only return mime type if not deductable + mtype = contentType + } + if !isChunkedFile { - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } if part.Header.Get("Content-Encoding") == "gzip" { isGzipped = true } else if operation.IsGzippable(ext, mtype) { @@ -144,9 +158,10 @@ func ParseUpload(r *http.Request) ( return } func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) { + var pair []byte fname, mimeType, isGzipped, isChunkedFile := "", "", false, false n = new(Needle) - fname, n.Data, mimeType, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + fname, n.Data, mimeType, pair, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) if e != nil { return } @@ -158,6 +173,11 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) { n.Mime = []byte(mimeType) n.SetHasMime() } + if len(pair) < 65536 { + n.Pairs = pair + n.PairsSize = uint16(len(pair)) + n.SetHasPairs() + } if isGzipped { n.SetGzipped() } diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index 8baa325df..ff43effb3 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -16,6 +16,7 @@ const ( FlagHasMime = 0x04 FlagHasLastModifiedDate = 0x08 FlagHasTtl = 0x10 + FlagHasPairs = 0x20 FlagIsChunkManifest = 0x80 LastModifiedBytesLength = 5 TtlBytesLength = 2 @@ -78,6 +79,9 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i if n.HasTtl() { n.Size = n.Size + TtlBytesLength } + if n.HasPairs() { + n.Size += 2 + uint32(n.PairsSize) + } } else { n.Size = 0 } @@ -128,6 +132,15 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i return } } + if n.HasPairs() { + util.Uint16toBytes(header[0:2], n.PairsSize) + if _, err = w.Write(header[0:2]); err != nil { + return + } + if _, err = w.Write(n.Pairs); err != nil { + return + } + } } padding := NeedlePaddingSize - ((NeedleHeaderSize + n.Size + NeedleChecksumSize) % NeedlePaddingSize) util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) @@ -141,8 +154,9 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i } func ReadNeedleBlob(r *os.File, offset int64, size uint32) (dataSlice []byte, block *Block, err error) { - padding := NeedlePaddingSize - ((NeedleHeaderSize + size + NeedleChecksumSize) % NeedlePaddingSize) - readSize := NeedleHeaderSize + size + NeedleChecksumSize + padding + NeedleWithoutPaddingSize := NeedleHeaderSize + size + NeedleChecksumSize + padding := NeedlePaddingSize - (NeedleWithoutPaddingSize % NeedlePaddingSize) + readSize := NeedleWithoutPaddingSize + padding return getBytesForFileBlock(r, offset, int(readSize)) } @@ -213,6 +227,13 @@ func (n *Needle) readNeedleDataVersion2(bytes []byte) { n.Ttl = LoadTTLFromBytes(bytes[index : index+TtlBytesLength]) index = index + TtlBytesLength } + if index < lenBytes && n.HasPairs() { + n.PairsSize = util.BytesToUint16(bytes[index : index+2]) + index += 2 + end := index + int(n.PairsSize) + n.Pairs = bytes[index:end] + index = end + } } func ReadNeedleHeader(r *os.File, version Version, offset int64) (n *Needle, bodyLength uint32, err error) { @@ -296,3 +317,11 @@ func (n *Needle) IsChunkedManifest() bool { func (n *Needle) SetIsChunkManifest() { n.Flags = n.Flags | FlagIsChunkManifest } + +func (n *Needle) HasPairs() bool { + return n.Flags&FlagHasPairs != 0 +} + +func (n *Needle) SetHasPairs() { + n.Flags = n.Flags | FlagHasPairs +} diff --git a/weed/storage/store.go b/weed/storage/store.go index 614c87ace..37a3904bd 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -303,6 +303,7 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) { err = fmt.Errorf("Volume %d is read only", i) return } + // TODO: count needle size ahead if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) { size, err = v.writeNeedle(n) } else { diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index be5777167..e76771140 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -57,7 +57,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, u.RawQuery = q.Encode() _, err := operation.Upload(u.String(), string(needle.Name), bytes.NewReader(needle.Data), needle.IsGzipped(), string(needle.Mime), - jwt) + needle.Pairs, jwt) return err }); err != nil { ret = 0 From da9b672d1bce089c7a74e6b4bb68bb68cc4097f2 Mon Sep 17 00:00:00 2001 From: sparklxb Date: Sun, 8 Jan 2017 22:34:26 +0800 Subject: [PATCH 50/61] support additional header name-value pairs --- weed/operation/upload_content.go | 13 +++------ weed/server/common.go | 4 +-- weed/server/filer_server_handlers_write.go | 3 +-- weed/storage/needle.go | 31 +++++++++++++--------- weed/topology/store_replicate.go | 15 ++++++++--- 5 files changed, 36 insertions(+), 30 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index b5784322a..30c7f1ea3 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -36,13 +36,13 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairs []byte, jwt security.EncodedJwt) (*UploadResult, error) { +func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { return upload_content(uploadUrl, func(w io.Writer) (err error) { _, err = io.Copy(w, reader) return - }, filename, isGzipped, mtype, pairs, jwt) + }, filename, isGzipped, mtype, pairMap, jwt) } -func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairs []byte, jwt security.EncodedJwt) (*UploadResult, error) { +func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { body_buf := bytes.NewBufferString("") body_writer := multipart.NewWriter(body_buf) h := make(textproto.MIMEHeader) @@ -59,13 +59,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if jwt != "" { h.Set("Authorization", "BEARER "+string(jwt)) } - pairMap := make(map[string]string) - if len(pairs) != 0 { - err := json.Unmarshal(pairs, &pairMap) - if err != nil { - glog.V(0).Infoln("Unmarshal pairs error:", err) - } - } file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { diff --git a/weed/server/common.go b/weed/server/common.go index 3c9e3014f..c5956143f 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -86,7 +86,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairs, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r) + fname, data, mimeType, pairMap, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return @@ -112,7 +112,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairs, jwt) + uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, jwt) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index aed393dcd..80f222ce6 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -13,10 +13,9 @@ import ( "net/http" "net/textproto" "net/url" - "strings" - "path" "strconv" + "strings" "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" diff --git a/weed/storage/needle.go b/weed/storage/needle.go index ea013e290..5bafe2f62 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -1,6 +1,7 @@ package storage import ( + "encoding/json" "fmt" "io/ioutil" "math" @@ -11,8 +12,6 @@ import ( "strings" "time" - "encoding/json" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" @@ -60,17 +59,15 @@ func (n *Needle) String() (str string) { } func ParseUpload(r *http.Request) ( - fileName string, data []byte, mimeType string, pairs []byte, isGzipped bool, + fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { - pairMap := make(map[string]string) + pairMap = make(map[string]string) for k, v := range r.Header { if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { pairMap[k] = v[0] } } - if len(pairMap) != 0 { - pairs, _ = json.Marshal(pairMap) - } + form, fe := r.MultipartReader() if fe != nil { glog.V(0).Infoln("MultipartReader [ERROR]", fe) @@ -158,10 +155,10 @@ func ParseUpload(r *http.Request) ( return } func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) { - var pair []byte + var pairMap map[string]string fname, mimeType, isGzipped, isChunkedFile := "", "", false, false n = new(Needle) - fname, n.Data, mimeType, pair, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + fname, n.Data, mimeType, pairMap, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) if e != nil { return } @@ -173,10 +170,18 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) { n.Mime = []byte(mimeType) n.SetHasMime() } - if len(pair) < 65536 { - n.Pairs = pair - n.PairsSize = uint16(len(pair)) - n.SetHasPairs() + if len(pairMap) != 0 { + trimmedPairMap := make(map[string]string) + for k, v := range pairMap { + trimmedPairMap[k[len(PairNamePrefix):]] = v + } + + pairs, _ := json.Marshal(trimmedPairMap) + if len(pairs) < 65536 { + n.Pairs = pairs + n.PairsSize = uint16(len(pairs)) + n.SetHasPairs() + } } if isGzipped { n.SetGzipped() diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index e76771140..aa312ac03 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -2,14 +2,14 @@ package topology import ( "bytes" + "encoding/json" "errors" "fmt" "net/http" + "net/url" "strconv" "strings" - "net/url" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" @@ -55,9 +55,18 @@ func ReplicatedWrite(masterNode string, s *storage.Store, q.Set("cm", "true") } u.RawQuery = q.Encode() + + pairMap := make(map[string]string) + if needle.HasPairs() { + err := json.Unmarshal(needle.Pairs, &pairMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + } + _, err := operation.Upload(u.String(), string(needle.Name), bytes.NewReader(needle.Data), needle.IsGzipped(), string(needle.Mime), - needle.Pairs, jwt) + pairMap, jwt) return err }); err != nil { ret = 0 From 7b6837cbc2b2c27b6acdc2c41d88cd27d0823e60 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Jan 2017 10:35:47 -0800 Subject: [PATCH 51/61] move back the section --- weed/storage/needle.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/weed/storage/needle.go b/weed/storage/needle.go index 5bafe2f62..82ba2fb6a 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -120,19 +120,21 @@ func ParseUpload(r *http.Request) ( } isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } if !isChunkedFile { + + dotIndex := strings.LastIndex(fileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(fileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && mtype != contentType { + mimeType = contentType //only return mime type if not deductable + mtype = contentType + } + if part.Header.Get("Content-Encoding") == "gzip" { isGzipped = true } else if operation.IsGzippable(ext, mtype) { From ed44f12f6db7f05aba8a76f1a0ff92356c43d4b0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Jan 2017 11:01:46 -0800 Subject: [PATCH 52/61] support Fallocate on linux --- weed/command/backup.go | 2 +- weed/command/compact.go | 11 +++++----- weed/command/master.go | 4 +++- weed/command/server.go | 4 +++- weed/server/master_server.go | 8 +++++++ weed/server/master_server_handlers_admin.go | 8 +++++++ weed/server/volume_server_handlers_admin.go | 23 ++++++++++++++++++--- weed/storage/disk_location.go | 2 +- weed/storage/needle_read_write.go | 9 ++------ weed/storage/store.go | 10 ++++----- weed/storage/volume.go | 4 ++-- weed/storage/volume_create.go | 17 +++++++++++++++ weed/storage/volume_create_linux.go | 19 +++++++++++++++++ weed/storage/volume_loading.go | 6 +++--- weed/storage/volume_vacuum.go | 8 +++---- weed/topology/allocate_volume.go | 1 + weed/topology/volume_growth.go | 1 + 17 files changed, 104 insertions(+), 33 deletions(-) create mode 100644 weed/storage/volume_create.go create mode 100644 weed/storage/volume_create_linux.go diff --git a/weed/command/backup.go b/weed/command/backup.go index 0b3994027..7983f7dab 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -75,7 +75,7 @@ func runBackup(cmd *Command, args []string) bool { return true } - v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl) + v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true diff --git a/weed/command/compact.go b/weed/command/compact.go index db11880ec..ae54db115 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -20,10 +20,11 @@ var cmdCompact = &Command{ } var ( - compactVolumePath = cmdCompact.Flag.String("dir", ".", "data directory to store files") - compactVolumeCollection = cmdCompact.Flag.String("collection", "", "volume collection name") - compactVolumeId = cmdCompact.Flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir.") - compactMethod = cmdCompact.Flag.Int("method", 0, "option to choose which compact method. use 0 or 1.") + compactVolumePath = cmdCompact.Flag.String("dir", ".", "data directory to store files") + compactVolumeCollection = cmdCompact.Flag.String("collection", "", "volume collection name") + compactVolumeId = cmdCompact.Flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir.") + compactMethod = cmdCompact.Flag.Int("method", 0, "option to choose which compact method. use 0 or 1.") + compactVolumePreallocate = cmdCompact.Flag.Int64("preallocate", 0, "preallocate volume disk space") ) func runCompact(cmd *Command, args []string) bool { @@ -34,7 +35,7 @@ func runCompact(cmd *Command, args []string) bool { vid := storage.VolumeId(*compactVolumeId) v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, - storage.NeedleMapInMemory, nil, nil) + storage.NeedleMapInMemory, nil, nil, *compactVolumePreallocate) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } diff --git a/weed/command/master.go b/weed/command/master.go index cd15defce..ec54fbd7b 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -35,6 +35,7 @@ var ( metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") masterPeers = cmdMaster.Flag.String("peers", "", "other master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") confFile = cmdMaster.Flag.String("conf", "/etc/weedfs/weedfs.conf", "Deprecating! xml configuration file") defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") @@ -73,7 +74,8 @@ func runMaster(cmd *Command, args []string) bool { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, *mport, *metaFolder, - *volumeSizeLimitMB, *mpulse, *confFile, *defaultReplicaPlacement, *garbageThreshold, + *volumeSizeLimitMB, *volumePreallocate, + *mpulse, *confFile, *defaultReplicaPlacement, *garbageThreshold, masterWhiteList, *masterSecureKey, ) diff --git a/weed/command/server.go b/weed/command/server.go index 027ba191d..87146940f 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -61,6 +61,7 @@ var ( masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") masterConfFile = cmdServer.Flag.String("master.conf", "/etc/weedfs/weedfs.conf", "xml configuration file") masterDefaultReplicaPlacement = cmdServer.Flag.String("master.defaultReplicaPlacement", "000", "Default replication type if not specified.") volumePort = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") @@ -204,7 +205,8 @@ func runServer(cmd *Command, args []string) bool { go func() { r := mux.NewRouter() ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, - *masterVolumeSizeLimitMB, *volumePulse, *masterConfFile, *masterDefaultReplicaPlacement, *serverGarbageThreshold, + *masterVolumeSizeLimitMB, *masterVolumePreallocate, + *volumePulse, *masterConfFile, *masterDefaultReplicaPlacement, *serverGarbageThreshold, serverWhiteList, *serverSecureKey, ) diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 61bda6988..9f59c2400 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -20,6 +20,7 @@ type MasterServer struct { port int metaFolder string volumeSizeLimitMB uint + preallocate int64 pulseSeconds int defaultReplicaPlacement string garbageThreshold string @@ -34,6 +35,7 @@ type MasterServer struct { func NewMasterServer(r *mux.Router, port int, metaFolder string, volumeSizeLimitMB uint, + preallocate bool, pulseSeconds int, confFile string, defaultReplicaPlacement string, @@ -41,9 +43,15 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, whiteList []string, secureKey string, ) *MasterServer { + + var preallocateSize int64 + if preallocate { + preallocateSize = int64(volumeSizeLimitMB) * (1 << 20) + } ms := &MasterServer{ port: port, volumeSizeLimitMB: volumeSizeLimitMB, + preallocate: preallocateSize, pulseSeconds: pulseSeconds, defaultReplicaPlacement: defaultReplicaPlacement, garbageThreshold: garbageThreshold, diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index a762bf416..efe81bf89 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -181,10 +181,18 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr if err != nil { return nil, err } + preallocate := ms.preallocate + if r.FormValue("preallocate") != "" { + preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64) + if err != nil { + return nil, fmt.Errorf("Failed to parse int64 preallocate = %s: %v", r.FormValue("preallocate"), err) + } + } volumeGrowOption := &topology.VolumeGrowOption{ Collection: r.FormValue("collection"), ReplicaPlacement: replicaPlacement, Ttl: ttl, + Prealloacte: preallocate, DataCenter: r.FormValue("dataCenter"), Rack: r.FormValue("rack"), DataNode: r.FormValue("dataNode"), diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index ae9817ef6..28631dac7 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -3,6 +3,7 @@ package weed_server import ( "net/http" "path/filepath" + "strconv" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/stats" @@ -17,13 +18,29 @@ func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { } func (vs *VolumeServer) assignVolumeHandler(w http.ResponseWriter, r *http.Request) { - err := vs.store.AddVolume(r.FormValue("volume"), r.FormValue("collection"), vs.needleMapKind, r.FormValue("replication"), r.FormValue("ttl")) + var err error + preallocate := int64(0) + if r.FormValue("preallocate") != "" { + preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64) + if err != nil { + glog.V(0).Infoln("ignoring invalid int64 value for preallocate = %v", r.FormValue("preallocate")) + } + } + err = vs.store.AddVolume( + r.FormValue("volume"), + r.FormValue("collection"), + vs.needleMapKind, + r.FormValue("replication"), + r.FormValue("ttl"), + preallocate, + ) if err == nil { writeJsonQuiet(w, r, http.StatusAccepted, map[string]string{"error": ""}) } else { writeJsonError(w, r, http.StatusNotAcceptable, err) } - glog.V(2).Infoln("assign volume =", r.FormValue("volume"), ", collection =", r.FormValue("collection"), ", replication =", r.FormValue("replication"), ", error =", err) + glog.V(2).Infoln("assign volume = %s, collection = %s , replication = %s, error = %v", + r.FormValue("volume"), r.FormValue("collection"), r.FormValue("replication"), err) } func (vs *VolumeServer) deleteCollectionHandler(w http.ResponseWriter, r *http.Request) { @@ -33,7 +50,7 @@ func (vs *VolumeServer) deleteCollectionHandler(w http.ResponseWriter, r *http.R } else { writeJsonError(w, r, http.StatusInternalServerError, err) } - glog.V(2).Infoln("deleting collection =", r.FormValue("collection"), ", error =", err) + glog.V(2).Infof("deleting collection = %s, error = %v", r.FormValue("collection"), err) } func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) { diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index 039b4f3b9..496e0dd57 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -36,7 +36,7 @@ func (l *DiskLocation) loadExistingVolume(dir os.FileInfo, needleMapKind NeedleM _, found := l.volumes[vid] mutex.RUnlock() if !found { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil); e == nil { + if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0); e == nil { mutex.Lock() l.volumes[vid] = v mutex.Unlock() diff --git a/weed/storage/needle_read_write.go b/weed/storage/needle_read_write.go index ff43effb3..4f03ce396 100644 --- a/weed/storage/needle_read_write.go +++ b/weed/storage/needle_read_write.go @@ -146,18 +146,13 @@ func (n *Needle) Append(w io.Writer, version Version) (size uint32, actualSize i util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value()) _, err = w.Write(header[0 : NeedleChecksumSize+padding]) - actualSize = NeedleHeaderSize + int64(n.Size) + NeedleChecksumSize + int64(padding) - - return n.DataSize, actualSize, err + return n.DataSize, getActualSize(n.Size), err } return 0, 0, fmt.Errorf("Unsupported Version! (%d)", version) } func ReadNeedleBlob(r *os.File, offset int64, size uint32) (dataSlice []byte, block *Block, err error) { - NeedleWithoutPaddingSize := NeedleHeaderSize + size + NeedleChecksumSize - padding := NeedlePaddingSize - (NeedleWithoutPaddingSize % NeedlePaddingSize) - readSize := NeedleWithoutPaddingSize + padding - return getBytesForFileBlock(r, offset, int(readSize)) + return getBytesForFileBlock(r, offset, int(getActualSize(size))) } func (n *Needle) ReadData(r *os.File, offset int64, size uint32, version Version) (err error) { diff --git a/weed/storage/store.go b/weed/storage/store.go index 37a3904bd..be2044d64 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -95,7 +95,7 @@ func NewStore(port int, ip, publicUrl string, dirnames []string, maxVolumeCounts } return } -func (s *Store) AddVolume(volumeListString string, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string) error { +func (s *Store) AddVolume(volumeListString string, collection string, needleMapKind NeedleMapType, replicaPlacement string, ttlString string, preallocate int64) error { rt, e := NewReplicaPlacementFromString(replicaPlacement) if e != nil { return e @@ -111,7 +111,7 @@ func (s *Store) AddVolume(volumeListString string, collection string, needleMapK if err != nil { return fmt.Errorf("Volume Id %s is not a valid unsigned integer!", id_string) } - e = s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl) + e = s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl, preallocate) } else { pair := strings.Split(range_string, "-") start, start_err := strconv.ParseUint(pair[0], 10, 64) @@ -123,7 +123,7 @@ func (s *Store) AddVolume(volumeListString string, collection string, needleMapK return fmt.Errorf("Volume End Id %s is not a valid unsigned integer!", pair[1]) } for id := start; id <= end; id++ { - if err := s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl); err != nil { + if err := s.addVolume(VolumeId(id), collection, needleMapKind, rt, ttl, preallocate); err != nil { e = err } } @@ -160,14 +160,14 @@ func (s *Store) findFreeLocation() (ret *DiskLocation) { } return ret } -func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL) error { +func (s *Store) addVolume(vid VolumeId, collection string, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL, preallocate int64) error { if s.findVolume(vid) != nil { return fmt.Errorf("Volume Id %d already exists!", vid) } if location := s.findFreeLocation(); location != nil { glog.V(0).Infof("In dir %s adds volume:%v collection:%s replicaPlacement:%v ttl:%v", location.Directory, vid, collection, replicaPlacement, ttl) - if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl); err == nil { + if volume, err := NewVolume(location.Directory, collection, vid, needleMapKind, replicaPlacement, ttl, preallocate); err == nil { location.SetVolume(vid, volume) return nil } else { diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 11ee600df..df9f0b7a7 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -29,11 +29,11 @@ type Volume struct { lastCompactRevision uint16 } -func NewVolume(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL) (v *Volume, e error) { +func NewVolume(dirname string, collection string, id VolumeId, needleMapKind NeedleMapType, replicaPlacement *ReplicaPlacement, ttl *TTL, preallocate int64) (v *Volume, e error) { v = &Volume{dir: dirname, Collection: collection, Id: id} v.SuperBlock = SuperBlock{ReplicaPlacement: replicaPlacement, Ttl: ttl} v.needleMapKind = needleMapKind - e = v.load(true, true, needleMapKind) + e = v.load(true, true, needleMapKind, preallocate) return } func (v *Volume) String() string { diff --git a/weed/storage/volume_create.go b/weed/storage/volume_create.go new file mode 100644 index 000000000..6b3a17439 --- /dev/null +++ b/weed/storage/volume_create.go @@ -0,0 +1,17 @@ +// +build !linux + +package storage + +import ( + "os" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func createVolumeFile(fileName string, preallocate int64) (file *os.File, e error) { + file, e = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644) + if preallocate > 0 { + glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) + } + return file, e +} diff --git a/weed/storage/volume_create_linux.go b/weed/storage/volume_create_linux.go new file mode 100644 index 000000000..8f6bab2fe --- /dev/null +++ b/weed/storage/volume_create_linux.go @@ -0,0 +1,19 @@ +// +build linux + +package storage + +import ( + "os" + "syscall" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func createVolumeFile(fileName string, preallocate int64) (file *os.File, e error) { + file, e = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0644) + if preallocate != 0 { + syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) + glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) + } + return file, e +} diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 7bc65a4a3..c4f1aae9b 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -12,11 +12,11 @@ func loadVolumeWithoutIndex(dirname string, collection string, id VolumeId, need v = &Volume{dir: dirname, Collection: collection, Id: id} v.SuperBlock = SuperBlock{} v.needleMapKind = needleMapKind - e = v.load(false, false, needleMapKind) + e = v.load(false, false, needleMapKind, 0) return } -func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType) error { +func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error { var e error fileName := v.FileName() @@ -34,7 +34,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } } else { if createDatIfMissing { - v.dataFile, e = os.OpenFile(fileName+".dat", os.O_RDWR|os.O_CREATE, 0644) + v.dataFile, e = createVolumeFile(fileName+".dat", preallocate) } else { return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName) } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index f3ded5ff2..13072d1fb 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -24,7 +24,7 @@ func (v *Volume) Compact() error { v.lastCompactIndexOffset = v.nm.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactRevision glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) - return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx") + return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", v.dataFileSize) } func (v *Volume) Compact2() error { @@ -66,7 +66,7 @@ func (v *Volume) commitCompact() error { //glog.V(3).Infof("Pretending to be vacuuming...") //time.Sleep(20 * time.Second) glog.V(3).Infof("Loading Commit file...") - if e = v.load(true, false, v.needleMapKind); e != nil { + if e = v.load(true, false, v.needleMapKind, 0); e != nil { return e } return nil @@ -207,11 +207,11 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI return nil } -func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string) (err error) { +func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64) (err error) { var ( dst, idx *os.File ) - if dst, err = os.OpenFile(dstName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644); err != nil { + if dst, err = createVolumeFile(dstName, preallocate); err != nil { return } defer dst.Close() diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index 7b267a805..ebf8ecbf0 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -20,6 +20,7 @@ func AllocateVolume(dn *DataNode, vid storage.VolumeId, option *VolumeGrowOption values.Add("collection", option.Collection) values.Add("replication", option.ReplicaPlacement.String()) values.Add("ttl", option.Ttl.String()) + values.Add("preallocate", fmt.Sprintf("%d", option.Prealloacte)) jsonBlob, err := util.Post("http://"+dn.Url()+"/admin/assign_volume", values) if err != nil { return err diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 3a1c9c567..ddf687419 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -21,6 +21,7 @@ type VolumeGrowOption struct { Collection string ReplicaPlacement *storage.ReplicaPlacement Ttl *storage.TTL + Prealloacte int64 DataCenter string Rack string DataNode string From 53cf1b4900630883ef38a95324cda29f50e75b8d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Jan 2017 14:57:32 -0800 Subject: [PATCH 53/61] change the basic unit to MB --- weed/command/compact.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/compact.go b/weed/command/compact.go index ae54db115..5c3038c78 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -24,7 +24,7 @@ var ( compactVolumeCollection = cmdCompact.Flag.String("collection", "", "volume collection name") compactVolumeId = cmdCompact.Flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir.") compactMethod = cmdCompact.Flag.Int("method", 0, "option to choose which compact method. use 0 or 1.") - compactVolumePreallocate = cmdCompact.Flag.Int64("preallocate", 0, "preallocate volume disk space") + compactVolumePreallocate = cmdCompact.Flag.Int64("preallocateMB", 0, "preallocate volume disk space") ) func runCompact(cmd *Command, args []string) bool { @@ -35,7 +35,7 @@ func runCompact(cmd *Command, args []string) bool { vid := storage.VolumeId(*compactVolumeId) v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, - storage.NeedleMapInMemory, nil, nil, *compactVolumePreallocate) + storage.NeedleMapInMemory, nil, nil, *compactVolumePreallocate*(1<<20)) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } From 90a6f43c5610cf9d05f043f2630fab56b3202a69 Mon Sep 17 00:00:00 2001 From: wangjie Date: Mon, 9 Jan 2017 14:55:21 +0800 Subject: [PATCH 54/61] fix the bug than we can't get filename when download file. --- weed/util/http_util.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 83302663e..f3e97f6f1 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -148,8 +148,9 @@ func DownloadUrl(fileUrl string) (filename string, rc io.ReadCloser, e error) { } contentDisposition := response.Header["Content-Disposition"] if len(contentDisposition) > 0 { - if strings.HasPrefix(contentDisposition[0], "filename=") { - filename = contentDisposition[0][len("filename="):] + idx := strings.Index(contentDisposition[0], "filename=") + if idx != -1 { + filename = contentDisposition[0][idx+len("filename="):] filename = strings.Trim(filename, "\"") } } From 66e7013dfe1f14f6436c07a0ccf5aaa19ea599f7 Mon Sep 17 00:00:00 2001 From: sparklxb Date: Mon, 9 Jan 2017 23:34:07 +0800 Subject: [PATCH 55/61] suport uploading files to specific dataCenter --- weed/command/filer_copy.go | 2 +- weed/command/upload.go | 6 ++-- weed/operation/submit.go | 63 +++++++++++++++++++++++++++----------- 3 files changed, 50 insertions(+), 21 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 2aa994f6f..da7fb43bb 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -126,7 +126,7 @@ func doEachCopy(fileOrDir string, host string, path string) bool { } results, err := operation.SubmitFiles(*copy.master, parts, - *copy.replication, *copy.collection, + *copy.replication, *copy.collection, "", *copy.ttl, *copy.maxMB, copy.secret) if err != nil { fmt.Printf("Failed to submit file %s: %v", fileOrDir, err) diff --git a/weed/command/upload.go b/weed/command/upload.go index d7a468610..72ef0af73 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -20,6 +20,7 @@ type UploadOptions struct { include *string replication *string collection *string + dataCenter *string ttl *string maxMB *int secretKey *string @@ -33,6 +34,7 @@ func init() { upload.include = cmdUpload.Flag.String("include", "", "pattens of files to upload, e.g., *.pdf, *.html, ab?d.txt, works together with -dir") upload.replication = cmdUpload.Flag.String("replication", "", "replication type") upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") + upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") @@ -80,7 +82,7 @@ func runUpload(cmd *Command, args []string) bool { return e } results, e := operation.SubmitFiles(*upload.master, parts, - *upload.replication, *upload.collection, + *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, secret) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) @@ -99,7 +101,7 @@ func runUpload(cmd *Command, args []string) bool { fmt.Println(e.Error()) } results, _ := operation.SubmitFiles(*upload.master, parts, - *upload.replication, *upload.collection, + *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, secret) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 75d5afbde..349cddfce 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -23,6 +23,7 @@ type FilePart struct { ModTime int64 //in seconds Replication string Collection string + DataCenter string Ttl string Server string //this comes from assign result Fid string //this comes from assign result, but customizable @@ -37,7 +38,7 @@ type SubmitResult struct { } func SubmitFiles(master string, files []FilePart, - replication string, collection string, ttl string, maxMB int, + replication string, collection string, dataCenter string, ttl string, maxMB int, secret security.Secret, ) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) @@ -48,6 +49,7 @@ func SubmitFiles(master string, files []FilePart, Count: uint64(len(files)), Replication: replication, Collection: collection, + DataCenter: dataCenter, Ttl: ttl, } ret, err := Assign(master, ar) @@ -65,6 +67,7 @@ func SubmitFiles(master string, files []FilePart, file.Server = ret.Url file.Replication = replication file.Collection = collection + file.DataCenter = dataCenter results[index].Size, err = file.Upload(maxMB, master, secret) if err != nil { results[index].Error = err.Error() @@ -129,11 +132,46 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret Chunks: make([]*ChunkInfo, 0, chunks), } + var ret *AssignResult + var id string + if fi.DataCenter != "" { + ar := &VolumeAssignRequest{ + Count: uint64(chunks), + Replication: fi.Replication, + Collection: fi.Collection, + Ttl: fi.Ttl, + } + ret, err = Assign(master, ar) + if err != nil { + return + } + } for i := int64(0); i < chunks; i++ { - id, count, e := upload_one_chunk( + if fi.DataCenter == "" { + ar := &VolumeAssignRequest{ + Count: 1, + Replication: fi.Replication, + Collection: fi.Collection, + Ttl: fi.Ttl, + } + ret, err = Assign(master, ar) + if err != nil { + // delete all uploaded chunks + cm.DeleteChunks(master) + return + } + id = ret.Fid + } else { + id = ret.Fid + if i > 0 { + id += "_" + strconv.FormatInt(i, 10) + } + } + fileUrl := "http://" + ret.Url + "/" + id + count, e := upload_one_chunk( baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), - master, fi.Replication, fi.Collection, fi.Ttl, + master, fileUrl, jwt) if e != nil { // delete all uploaded chunks @@ -165,26 +203,15 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret } func upload_one_chunk(filename string, reader io.Reader, master, - replication string, collection string, ttl string, jwt security.EncodedJwt, -) (fid string, size uint32, e error) { - ar := &VolumeAssignRequest{ - Count: 1, - Replication: replication, - Collection: collection, - Ttl: ttl, - } - ret, err := Assign(master, ar) - if err != nil { - return "", 0, err - } - fileUrl, fid := "http://"+ret.Url+"/"+ret.Fid, ret.Fid + fileUrl string, jwt security.EncodedJwt, +) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") uploadResult, uploadError := Upload(fileUrl, filename, reader, false, "application/octet-stream", nil, jwt) if uploadError != nil { - return fid, 0, uploadError + return 0, uploadError } - return fid, uploadResult.Size, nil + return uploadResult.Size, nil } func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error { From 4beaaa06505220c80d502d7b3ebd8b8b71071f5f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Jan 2017 19:31:55 -0800 Subject: [PATCH 56/61] simplify the gzip optimization logic fix https://github.com/chrislusf/seaweedfs/issues/436 --- weed/storage/needle.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/weed/storage/needle.go b/weed/storage/needle.go index 82ba2fb6a..29e70ff10 100644 --- a/weed/storage/needle.go +++ b/weed/storage/needle.go @@ -144,11 +144,13 @@ func ParseUpload(r *http.Request) ( isGzipped = true } if ext == ".gz" { - isGzipped = true - } - if strings.HasSuffix(fileName, ".gz") && - !strings.HasSuffix(fileName, ".tar.gz") { - fileName = fileName[:len(fileName)-3] + if strings.HasSuffix(fileName, ".css.gz") || + strings.HasSuffix(fileName, ".html.gz") || + strings.HasSuffix(fileName, ".txt.gz") || + strings.HasSuffix(fileName, ".js.gz") { + fileName = fileName[:len(fileName)-3] + isGzipped = true + } } } modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) From e46c3415f752e2e0c252c420adb882c4bcb7416b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Jan 2017 01:01:12 -0800 Subject: [PATCH 57/61] gRpc for master~volume heartbeat --- weed/command/master.go | 27 +- weed/command/server.go | 28 +- weed/command/volume.go | 2 +- weed/operation/system_message.pb.go | 203 ----------- weed/operation/system_message_test.go | 59 --- weed/pb/Makefile | 6 + weed/pb/seaweed.pb.go | 384 ++++++++++++++++++++ weed/pb/seaweed.proto | 41 +++ weed/server/master_grpc_server.go | 57 +++ weed/server/master_server.go | 1 - weed/server/master_server_handlers_admin.go | 36 -- weed/server/volume_grpc_client.go | 74 ++++ weed/server/volume_server.go | 34 +- weed/storage/store.go | 102 ++---- weed/storage/volume_info.go | 25 +- weed/topology/data_node.go | 3 +- weed/topology/node.go | 6 - weed/topology/rack.go | 5 - weed/topology/topology.go | 40 +- weed/topology/topology_event_handling.go | 14 - 20 files changed, 664 insertions(+), 483 deletions(-) delete mode 100644 weed/operation/system_message.pb.go delete mode 100644 weed/operation/system_message_test.go create mode 100644 weed/pb/Makefile create mode 100644 weed/pb/seaweed.pb.go create mode 100644 weed/pb/seaweed.proto create mode 100644 weed/server/master_grpc_server.go create mode 100644 weed/server/volume_grpc_client.go diff --git a/weed/command/master.go b/weed/command/master.go index ec54fbd7b..eee22810b 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -10,9 +10,13 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" + "github.com/soheilhy/cmux" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" ) func init() { @@ -39,7 +43,7 @@ var ( mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") confFile = cmdMaster.Flag.String("conf", "/etc/weedfs/weedfs.conf", "Deprecating! xml configuration file") defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") - mTimeout = cmdMaster.Flag.Int("idleTimeout", 10, "connection idle seconds") + mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds") mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") garbageThreshold = cmdMaster.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces") masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") @@ -99,8 +103,25 @@ func runMaster(cmd *Command, args []string) bool { ms.SetRaftServer(raftServer) }() - if e := http.Serve(listener, r); e != nil { - glog.Fatalf("Fail to serve: %v", e) + // start grpc and http server + m := cmux.New(listener) + + grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) + httpL := m.Match(cmux.Any()) + + // Create your protocol servers. + grpcS := grpc.NewServer() + pb.RegisterSeaweedServer(grpcS, ms) + reflection.Register(grpcS) + + httpS := &http.Server{Handler: r} + + go grpcS.Serve(grpcL) + go httpS.Serve(httpL) + + if err := m.Serve(); err != nil { + glog.Fatalf("master server failed to serve: %v", err) } + return true } diff --git a/weed/command/server.go b/weed/command/server.go index 87146940f..5bde22517 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -11,10 +11,14 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" "github.com/gorilla/mux" + "github.com/soheilhy/cmux" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" ) type ServerOptions struct { @@ -51,7 +55,7 @@ var ( serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") serverMaxCpu = cmdServer.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - serverTimeout = cmdServer.Flag.Int("idleTimeout", 10, "connection idle seconds") + serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") @@ -230,9 +234,27 @@ func runServer(cmd *Command, args []string) bool { }() raftWaitForMaster.Done() - if e := http.Serve(masterListener, r); e != nil { - glog.Fatalf("Master Fail to serve:%s", e.Error()) + + // start grpc and http server + m := cmux.New(masterListener) + + grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) + httpL := m.Match(cmux.Any()) + + // Create your protocol servers. + grpcS := grpc.NewServer() + pb.RegisterSeaweedServer(grpcS, ms) + reflection.Register(grpcS) + + httpS := &http.Server{Handler: r} + + go grpcS.Serve(grpcL) + go httpS.Serve(httpL) + + if err := m.Serve(); err != nil { + glog.Fatalf("master server failed to serve: %v", err) } + }() volumeWait.Wait() diff --git a/weed/command/volume.go b/weed/command/volume.go index ba498b8e4..0e69325b6 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -48,7 +48,7 @@ func init() { v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") v.master = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location") v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") - v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 10, "connection idle seconds") + v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") diff --git a/weed/operation/system_message.pb.go b/weed/operation/system_message.pb.go deleted file mode 100644 index 742a1ca4e..000000000 --- a/weed/operation/system_message.pb.go +++ /dev/null @@ -1,203 +0,0 @@ -// Code generated by protoc-gen-go. -// source: system_message.proto -// DO NOT EDIT! - -/* -Package operation is a generated protocol buffer package. - -It is generated from these files: - system_message.proto - -It has these top-level messages: - VolumeInformationMessage - JoinMessage -*/ -package operation - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type VolumeInformationMessage struct { - Id *uint32 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - Size *uint64 `protobuf:"varint,2,req,name=size" json:"size,omitempty"` - Collection *string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount *uint64 `protobuf:"varint,4,req,name=file_count" json:"file_count,omitempty"` - DeleteCount *uint64 `protobuf:"varint,5,req,name=delete_count" json:"delete_count,omitempty"` - DeletedByteCount *uint64 `protobuf:"varint,6,req,name=deleted_byte_count" json:"deleted_byte_count,omitempty"` - ReadOnly *bool `protobuf:"varint,7,opt,name=read_only" json:"read_only,omitempty"` - ReplicaPlacement *uint32 `protobuf:"varint,8,req,name=replica_placement" json:"replica_placement,omitempty"` - Version *uint32 `protobuf:"varint,9,opt,name=version,def=2" json:"version,omitempty"` - Ttl *uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } -func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeInformationMessage) ProtoMessage() {} - -const Default_VolumeInformationMessage_Version uint32 = 2 - -func (m *VolumeInformationMessage) GetId() uint32 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *VolumeInformationMessage) GetSize() uint64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *VolumeInformationMessage) GetCollection() string { - if m != nil && m.Collection != nil { - return *m.Collection - } - return "" -} - -func (m *VolumeInformationMessage) GetFileCount() uint64 { - if m != nil && m.FileCount != nil { - return *m.FileCount - } - return 0 -} - -func (m *VolumeInformationMessage) GetDeleteCount() uint64 { - if m != nil && m.DeleteCount != nil { - return *m.DeleteCount - } - return 0 -} - -func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 { - if m != nil && m.DeletedByteCount != nil { - return *m.DeletedByteCount - } - return 0 -} - -func (m *VolumeInformationMessage) GetReadOnly() bool { - if m != nil && m.ReadOnly != nil { - return *m.ReadOnly - } - return false -} - -func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 { - if m != nil && m.ReplicaPlacement != nil { - return *m.ReplicaPlacement - } - return 0 -} - -func (m *VolumeInformationMessage) GetVersion() uint32 { - if m != nil && m.Version != nil { - return *m.Version - } - return Default_VolumeInformationMessage_Version -} - -func (m *VolumeInformationMessage) GetTtl() uint32 { - if m != nil && m.Ttl != nil { - return *m.Ttl - } - return 0 -} - -type JoinMessage struct { - IsInit *bool `protobuf:"varint,1,opt,name=is_init" json:"is_init,omitempty"` - Ip *string `protobuf:"bytes,2,req,name=ip" json:"ip,omitempty"` - Port *uint32 `protobuf:"varint,3,req,name=port" json:"port,omitempty"` - PublicUrl *string `protobuf:"bytes,4,opt,name=public_url" json:"public_url,omitempty"` - MaxVolumeCount *uint32 `protobuf:"varint,5,req,name=max_volume_count" json:"max_volume_count,omitempty"` - MaxFileKey *uint64 `protobuf:"varint,6,req,name=max_file_key" json:"max_file_key,omitempty"` - DataCenter *string `protobuf:"bytes,7,opt,name=data_center" json:"data_center,omitempty"` - Rack *string `protobuf:"bytes,8,opt,name=rack" json:"rack,omitempty"` - Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` - AdminPort *uint32 `protobuf:"varint,10,opt,name=admin_port" json:"admin_port,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *JoinMessage) Reset() { *m = JoinMessage{} } -func (m *JoinMessage) String() string { return proto.CompactTextString(m) } -func (*JoinMessage) ProtoMessage() {} - -func (m *JoinMessage) GetIsInit() bool { - if m != nil && m.IsInit != nil { - return *m.IsInit - } - return false -} - -func (m *JoinMessage) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *JoinMessage) GetPort() uint32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *JoinMessage) GetPublicUrl() string { - if m != nil && m.PublicUrl != nil { - return *m.PublicUrl - } - return "" -} - -func (m *JoinMessage) GetMaxVolumeCount() uint32 { - if m != nil && m.MaxVolumeCount != nil { - return *m.MaxVolumeCount - } - return 0 -} - -func (m *JoinMessage) GetMaxFileKey() uint64 { - if m != nil && m.MaxFileKey != nil { - return *m.MaxFileKey - } - return 0 -} - -func (m *JoinMessage) GetDataCenter() string { - if m != nil && m.DataCenter != nil { - return *m.DataCenter - } - return "" -} - -func (m *JoinMessage) GetRack() string { - if m != nil && m.Rack != nil { - return *m.Rack - } - return "" -} - -func (m *JoinMessage) GetVolumes() []*VolumeInformationMessage { - if m != nil { - return m.Volumes - } - return nil -} - -func (m *JoinMessage) GetAdminPort() uint32 { - if m != nil && m.AdminPort != nil { - return *m.AdminPort - } - return 0 -} - -func init() { -} diff --git a/weed/operation/system_message_test.go b/weed/operation/system_message_test.go deleted file mode 100644 index d18ca49a4..000000000 --- a/weed/operation/system_message_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package operation - -import ( - "encoding/json" - "log" - "testing" - - "github.com/golang/protobuf/proto" -) - -func TestSerialDeserial(t *testing.T) { - volumeMessage := &VolumeInformationMessage{ - Id: proto.Uint32(12), - Size: proto.Uint64(2341234), - Collection: proto.String("benchmark"), - FileCount: proto.Uint64(2341234), - DeleteCount: proto.Uint64(234), - DeletedByteCount: proto.Uint64(21234), - ReadOnly: proto.Bool(false), - ReplicaPlacement: proto.Uint32(210), - Version: proto.Uint32(2), - } - var volumeMessages []*VolumeInformationMessage - volumeMessages = append(volumeMessages, volumeMessage) - - joinMessage := &JoinMessage{ - IsInit: proto.Bool(true), - Ip: proto.String("127.0.3.12"), - Port: proto.Uint32(34546), - PublicUrl: proto.String("localhost:2342"), - MaxVolumeCount: proto.Uint32(210), - MaxFileKey: proto.Uint64(324234423), - DataCenter: proto.String("dc1"), - Rack: proto.String("rack2"), - Volumes: volumeMessages, - } - - data, err := proto.Marshal(joinMessage) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newMessage := &JoinMessage{} - err = proto.Unmarshal(data, newMessage) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - log.Println("The pb data size is", len(data)) - - jsonData, jsonError := json.Marshal(joinMessage) - if jsonError != nil { - log.Fatal("json marshaling error: ", jsonError) - } - log.Println("The json data size is", len(jsonData), string(jsonData)) - - // Now test and newTest contain the same data. - if *joinMessage.PublicUrl != *newMessage.PublicUrl { - log.Fatalf("data mismatch %q != %q", *joinMessage.PublicUrl, *newMessage.PublicUrl) - } -} diff --git a/weed/pb/Makefile b/weed/pb/Makefile new file mode 100644 index 000000000..8d0eb7854 --- /dev/null +++ b/weed/pb/Makefile @@ -0,0 +1,6 @@ +all: gen + +.PHONY : gen + +gen: + protoc seaweed.proto --go_out=plugins=grpc:. diff --git a/weed/pb/seaweed.pb.go b/weed/pb/seaweed.pb.go new file mode 100644 index 000000000..02de2d8a6 --- /dev/null +++ b/weed/pb/seaweed.pb.go @@ -0,0 +1,384 @@ +// Code generated by protoc-gen-go. +// source: seaweed.proto +// DO NOT EDIT! + +/* +Package pb is a generated protocol buffer package. + +It is generated from these files: + seaweed.proto + +It has these top-level messages: + Heartbeat + HeartbeatResponse + VolumeInformationMessage +*/ +package pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Heartbeat struct { + IsInit bool `protobuf:"varint,1,opt,name=is_init,json=isInit" json:"is_init,omitempty"` + Ip string `protobuf:"bytes,2,opt,name=ip" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,3,opt,name=port" json:"port,omitempty"` + PublicUrl string `protobuf:"bytes,4,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` + MaxVolumeCount uint32 `protobuf:"varint,5,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + MaxFileKey uint64 `protobuf:"varint,6,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` + DataCenter string `protobuf:"bytes,7,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,8,opt,name=rack" json:"rack,omitempty"` + Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` + AdminPort uint32 `protobuf:"varint,10,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` +} + +func (m *Heartbeat) Reset() { *m = Heartbeat{} } +func (m *Heartbeat) String() string { return proto.CompactTextString(m) } +func (*Heartbeat) ProtoMessage() {} +func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Heartbeat) GetIsInit() bool { + if m != nil { + return m.IsInit + } + return false +} + +func (m *Heartbeat) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *Heartbeat) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Heartbeat) GetPublicUrl() string { + if m != nil { + return m.PublicUrl + } + return "" +} + +func (m *Heartbeat) GetMaxVolumeCount() uint32 { + if m != nil { + return m.MaxVolumeCount + } + return 0 +} + +func (m *Heartbeat) GetMaxFileKey() uint64 { + if m != nil { + return m.MaxFileKey + } + return 0 +} + +func (m *Heartbeat) GetDataCenter() string { + if m != nil { + return m.DataCenter + } + return "" +} + +func (m *Heartbeat) GetRack() string { + if m != nil { + return m.Rack + } + return "" +} + +func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { + if m != nil { + return m.Volumes + } + return nil +} + +func (m *Heartbeat) GetAdminPort() uint32 { + if m != nil { + return m.AdminPort + } + return 0 +} + +type HeartbeatResponse struct { + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volumeSizeLimit" json:"volumeSizeLimit,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secretKey" json:"secretKey,omitempty"` +} + +func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } +func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) } +func (*HeartbeatResponse) ProtoMessage() {} +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { + if m != nil { + return m.VolumeSizeLimit + } + return 0 +} + +func (m *HeartbeatResponse) GetSecretKey() string { + if m != nil { + return m.SecretKey + } + return "" +} + +type VolumeInformationMessage struct { + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` +} + +func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } +func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeInformationMessage) ProtoMessage() {} +func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *VolumeInformationMessage) GetId() uint32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *VolumeInformationMessage) GetSize() uint64 { + if m != nil { + return m.Size + } + return 0 +} + +func (m *VolumeInformationMessage) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *VolumeInformationMessage) GetFileCount() uint64 { + if m != nil { + return m.FileCount + } + return 0 +} + +func (m *VolumeInformationMessage) GetDeleteCount() uint64 { + if m != nil { + return m.DeleteCount + } + return 0 +} + +func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 { + if m != nil { + return m.DeletedByteCount + } + return 0 +} + +func (m *VolumeInformationMessage) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 { + if m != nil { + return m.ReplicaPlacement + } + return 0 +} + +func (m *VolumeInformationMessage) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *VolumeInformationMessage) GetTtl() uint32 { + if m != nil { + return m.Ttl + } + return 0 +} + +func init() { + proto.RegisterType((*Heartbeat)(nil), "pb.Heartbeat") + proto.RegisterType((*HeartbeatResponse)(nil), "pb.HeartbeatResponse") + proto.RegisterType((*VolumeInformationMessage)(nil), "pb.VolumeInformationMessage") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Seaweed service + +type SeaweedClient interface { + SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) +} + +type seaweedClient struct { + cc *grpc.ClientConn +} + +func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient { + return &seaweedClient{cc} +} + +func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/pb.Seaweed/SendHeartbeat", opts...) + if err != nil { + return nil, err + } + x := &seaweedSendHeartbeatClient{stream} + return x, nil +} + +type Seaweed_SendHeartbeatClient interface { + Send(*Heartbeat) error + Recv() (*HeartbeatResponse, error) + grpc.ClientStream +} + +type seaweedSendHeartbeatClient struct { + grpc.ClientStream +} + +func (x *seaweedSendHeartbeatClient) Send(m *Heartbeat) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) { + m := new(HeartbeatResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Seaweed service + +type SeaweedServer interface { + SendHeartbeat(Seaweed_SendHeartbeatServer) error +} + +func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { + s.RegisterService(&_Seaweed_serviceDesc, srv) +} + +func _Seaweed_SendHeartbeat_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedServer).SendHeartbeat(&seaweedSendHeartbeatServer{stream}) +} + +type Seaweed_SendHeartbeatServer interface { + Send(*HeartbeatResponse) error + Recv() (*Heartbeat, error) + grpc.ServerStream +} + +type seaweedSendHeartbeatServer struct { + grpc.ServerStream +} + +func (x *seaweedSendHeartbeatServer) Send(m *HeartbeatResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedSendHeartbeatServer) Recv() (*Heartbeat, error) { + m := new(Heartbeat) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Seaweed_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Seaweed", + HandlerType: (*SeaweedServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SendHeartbeat", + Handler: _Seaweed_SendHeartbeat_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "seaweed.proto", +} + +func init() { proto.RegisterFile("seaweed.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x86, 0x3f, 0x3b, 0xfe, 0x92, 0x78, 0x52, 0x97, 0x74, 0x25, 0x84, 0x05, 0x05, 0x4c, 0x4e, + 0x96, 0x40, 0x11, 0x2a, 0x12, 0x17, 0x6e, 0x54, 0xaa, 0xa8, 0x0a, 0xa2, 0xda, 0x08, 0x2e, 0x1c, + 0xac, 0xb5, 0x3d, 0x45, 0xab, 0xae, 0xd7, 0xd6, 0x7a, 0x53, 0xe2, 0xfe, 0x39, 0x2e, 0xfc, 0x30, + 0xb4, 0xb3, 0x49, 0x5a, 0x90, 0xb8, 0xcd, 0x3c, 0xfb, 0x8e, 0x77, 0x67, 0xde, 0x31, 0x24, 0x3d, + 0x8a, 0x1f, 0x88, 0xf5, 0xb2, 0x33, 0xad, 0x6d, 0x59, 0xd8, 0x95, 0x8b, 0x9f, 0x21, 0xc4, 0x1f, + 0x50, 0x18, 0x5b, 0xa2, 0xb0, 0xec, 0x11, 0x4c, 0x64, 0x5f, 0x48, 0x2d, 0x6d, 0x1a, 0x64, 0x41, + 0x3e, 0xe5, 0x63, 0xd9, 0x9f, 0x6b, 0x69, 0xd9, 0x21, 0x84, 0xb2, 0x4b, 0xc3, 0x2c, 0xc8, 0x63, + 0x1e, 0xca, 0x8e, 0x31, 0x88, 0xba, 0xd6, 0xd8, 0x74, 0x94, 0x05, 0x79, 0xc2, 0x29, 0x66, 0x4f, + 0x01, 0xba, 0x75, 0xa9, 0x64, 0x55, 0xac, 0x8d, 0x4a, 0x23, 0xd2, 0xc6, 0x9e, 0x7c, 0x31, 0x8a, + 0xe5, 0x30, 0x6f, 0xc4, 0xa6, 0xb8, 0x69, 0xd5, 0xba, 0xc1, 0xa2, 0x6a, 0xd7, 0xda, 0xa6, 0xff, + 0x53, 0xf9, 0x61, 0x23, 0x36, 0x5f, 0x09, 0x9f, 0x3a, 0xca, 0x32, 0x38, 0x70, 0xca, 0x2b, 0xa9, + 0xb0, 0xb8, 0xc6, 0x21, 0x1d, 0x67, 0x41, 0x1e, 0x71, 0x68, 0xc4, 0xe6, 0x4c, 0x2a, 0xbc, 0xc0, + 0x81, 0x3d, 0x87, 0x59, 0x2d, 0xac, 0x28, 0x2a, 0xd4, 0x16, 0x4d, 0x3a, 0xa1, 0xbb, 0xc0, 0xa1, + 0x53, 0x22, 0xee, 0x7d, 0x46, 0x54, 0xd7, 0xe9, 0x94, 0x4e, 0x28, 0x66, 0x6f, 0x61, 0xe2, 0x2f, + 0xef, 0xd3, 0x38, 0x1b, 0xe5, 0xb3, 0x93, 0xe3, 0x65, 0x57, 0x2e, 0xfd, 0xc5, 0xe7, 0xfa, 0xaa, + 0x35, 0x8d, 0xb0, 0xb2, 0xd5, 0x9f, 0xb0, 0xef, 0xc5, 0x77, 0xe4, 0x3b, 0xb1, 0xeb, 0x4b, 0xd4, + 0x8d, 0xd4, 0x05, 0x75, 0x0c, 0xf4, 0xe4, 0x98, 0xc8, 0x65, 0x6b, 0xec, 0xe2, 0x1b, 0x1c, 0xed, + 0x07, 0xc8, 0xb1, 0xef, 0x5a, 0xdd, 0x23, 0xcb, 0xe1, 0x81, 0x2f, 0x5f, 0xc9, 0x5b, 0xfc, 0x28, + 0x9b, 0xed, 0x40, 0x23, 0xfe, 0x37, 0x66, 0xc7, 0x10, 0xf7, 0x58, 0x19, 0xb4, 0x17, 0x38, 0x6c, + 0x07, 0x7c, 0x07, 0x16, 0xbf, 0x42, 0x48, 0xff, 0xf5, 0x42, 0x32, 0xa5, 0xa6, 0xef, 0x26, 0x3c, + 0x94, 0xb5, 0x6b, 0xba, 0x97, 0xb7, 0x48, 0x5f, 0x89, 0x38, 0xc5, 0xec, 0x19, 0x40, 0xd5, 0x2a, + 0x85, 0x95, 0x2b, 0x24, 0xbb, 0x62, 0x7e, 0x8f, 0xb8, 0xe6, 0x68, 0xce, 0xde, 0x8f, 0x88, 0x2a, + 0x63, 0x47, 0xbc, 0x15, 0x2f, 0xe0, 0xa0, 0x46, 0x85, 0xf6, 0xbe, 0x61, 0x11, 0x9f, 0x79, 0xe6, + 0x25, 0xaf, 0x80, 0xf9, 0xb4, 0x2e, 0xca, 0x61, 0x2f, 0xf4, 0x9e, 0xcd, 0xb7, 0x27, 0xef, 0x87, + 0x9d, 0xfa, 0x09, 0xc4, 0x06, 0x45, 0x5d, 0xb4, 0x5a, 0x0d, 0xe4, 0xdb, 0x94, 0x4f, 0x1d, 0xf8, + 0xac, 0xd5, 0xc0, 0x5e, 0xc2, 0x91, 0xc1, 0x4e, 0xc9, 0x4a, 0x14, 0x9d, 0x12, 0x15, 0x36, 0xa8, + 0x2d, 0x59, 0x98, 0xf0, 0xf9, 0xf6, 0xe0, 0x72, 0xc7, 0x59, 0x0a, 0x93, 0x1b, 0x34, 0xbd, 0x6b, + 0x2b, 0x26, 0xc9, 0x2e, 0x65, 0x73, 0x18, 0x59, 0xab, 0xb6, 0x4e, 0xb9, 0xf0, 0xe4, 0x0c, 0x26, + 0x2b, 0xbf, 0xfa, 0xec, 0x1d, 0x24, 0x2b, 0xd4, 0xf5, 0xdd, 0xce, 0x27, 0x6e, 0x0b, 0xf6, 0xe9, + 0xe3, 0x87, 0x7f, 0xa4, 0x3b, 0x43, 0x17, 0xff, 0xe5, 0xc1, 0xeb, 0xa0, 0x1c, 0xd3, 0x8f, 0xf3, + 0xe6, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0xc1, 0xd3, 0x35, 0x49, 0x03, 0x00, 0x00, +} diff --git a/weed/pb/seaweed.proto b/weed/pb/seaweed.proto new file mode 100644 index 000000000..2dc8343a2 --- /dev/null +++ b/weed/pb/seaweed.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package pb; + +////////////////////////////////////////////////// + +service Seaweed { + rpc SendHeartbeat(stream Heartbeat) returns (stream HeartbeatResponse) {} +} + +////////////////////////////////////////////////// + +message Heartbeat { + bool is_init = 1; + string ip = 2; + uint32 port = 3; + string public_url = 4; + uint32 max_volume_count = 5; + uint64 max_file_key = 6; + string data_center = 7; + string rack = 8; + repeated VolumeInformationMessage volumes = 9; + uint32 admin_port = 10; +} +message HeartbeatResponse { + uint64 volumeSizeLimit = 1; + string secretKey = 2; +} + +message VolumeInformationMessage { + uint32 id = 1; + uint64 size = 2; + string collection = 3; + uint64 file_count = 4; + uint64 delete_count = 5; + uint64 deleted_byte_count = 6; + bool read_only = 7; + uint32 replica_placement = 8; + uint32 version = 9; + uint32 ttl = 10; +} diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go new file mode 100644 index 000000000..29c95a3d4 --- /dev/null +++ b/weed/server/master_grpc_server.go @@ -0,0 +1,57 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/topology" +) + +func (ms MasterServer) SendHeartbeat(stream pb.Seaweed_SendHeartbeatServer) error { + var dn *topology.DataNode + t := ms.Topo + for { + heartbeat, err := stream.Recv() + if err == nil { + if dn == nil { + t.Sequence.SetMax(heartbeat.MaxFileKey) + dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) + dc := t.GetOrCreateDataCenter(dcName) + rack := dc.GetOrCreateRack(rackName) + dn = rack.GetOrCreateDataNode(heartbeat.Ip, + int(heartbeat.Port), heartbeat.PublicUrl, + int(heartbeat.MaxVolumeCount)) + glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) + if err := stream.Send(&pb.HeartbeatResponse{ + VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, + SecretKey: string(ms.guard.SecretKey), + }); err != nil { + return err + } + } + + var volumeInfos []storage.VolumeInfo + for _, v := range heartbeat.Volumes { + if vi, err := storage.NewVolumeInfo(v); err == nil { + volumeInfos = append(volumeInfos, vi) + } else { + glog.V(0).Infof("Fail to convert joined volume information: %v", err) + } + } + deletedVolumes := dn.UpdateVolumes(volumeInfos) + for _, v := range volumeInfos { + t.RegisterVolumeLayout(v, dn) + } + for _, v := range deletedVolumes { + t.UnRegisterVolumeLayout(v, dn) + } + + } else { + glog.V(0).Infof("lost volume server %s:%d", dn.Ip, dn.Port) + if dn != nil { + t.UnRegisterDataNode(dn) + } + return err + } + } +} diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 9f59c2400..f02cb2790 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -72,7 +72,6 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string, r.HandleFunc("/ui/index.html", ms.uiStatusHandler) r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) - r.HandleFunc("/dir/join", ms.proxyToLeader(ms.guard.WhiteList(ms.dirJoinHandler))) r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) r.HandleFunc("/vol/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeLookupHandler))) diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index efe81bf89..b15125576 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -1,21 +1,16 @@ package weed_server import ( - "encoding/json" "errors" "fmt" - "io/ioutil" "math/rand" "net/http" "strconv" - "strings" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) { @@ -34,37 +29,6 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R ms.Topo.DeleteCollection(r.FormValue("collection")) } -func (ms *MasterServer) dirJoinHandler(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) - if err != nil { - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - joinMessage := &operation.JoinMessage{} - if err = proto.Unmarshal(body, joinMessage); err != nil { - writeJsonError(w, r, http.StatusBadRequest, err) - return - } - if *joinMessage.Ip == "" { - *joinMessage.Ip = r.RemoteAddr[0:strings.LastIndex(r.RemoteAddr, ":")] - } - if glog.V(4) { - if jsonData, jsonError := json.Marshal(joinMessage); jsonError != nil { - glog.V(0).Infoln("json marshaling error: ", jsonError) - writeJsonError(w, r, http.StatusBadRequest, jsonError) - return - } else { - glog.V(4).Infoln("Proto size", len(body), "json size", len(jsonData), string(jsonData)) - } - } - - ms.Topo.ProcessJoinMessage(joinMessage) - writeJsonQuiet(w, r, http.StatusOK, operation.JoinResult{ - VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, - SecretKey: string(ms.guard.SecretKey), - }) -} - func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) m["Version"] = util.VERSION diff --git a/weed/server/volume_grpc_client.go b/weed/server/volume_grpc_client.go new file mode 100644 index 000000000..54e2c2f75 --- /dev/null +++ b/weed/server/volume_grpc_client.go @@ -0,0 +1,74 @@ +package weed_server + +import ( + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func (vs *VolumeServer) heartbeat() { + + glog.V(0).Infof("Volume server bootstraps with master %s", vs.GetMasterNode()) + vs.masterNodes = storage.NewMasterNodes(vs.masterNode) + vs.store.SetDataCenter(vs.dataCenter) + vs.store.SetRack(vs.rack) + + for { + err := vs.doHeartbeat(time.Duration(vs.pulseSeconds) * time.Second) + if err != nil { + glog.V(0).Infof("heartbeat error: %v", err) + time.Sleep(time.Duration(3*vs.pulseSeconds) * time.Second) + } + } +} + +func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error { + + masterNode, err := vs.masterNodes.FindMaster() + if err != nil { + return fmt.Errorf("No master found: %v", err) + } + + grpcConection, err := grpc.Dial(masterNode, grpc.WithInsecure()) + if err != nil { + return fmt.Errorf("fail to dial: %v", err) + } + defer grpcConection.Close() + + client := pb.NewSeaweedClient(grpcConection) + stream, err := client.SendHeartbeat(context.Background()) + if err != nil { + glog.V(0).Infof("%v.SendHeartbeat(_) = _, %v", client, err) + return err + } + vs.SetMasterNode(masterNode) + glog.V(0).Infof("Heartbeat to %s", masterNode) + + vs.store.Client = stream + defer func() { vs.store.Client = nil }() + + go func() { + for { + in, err := stream.Recv() + if err != nil { + return + } + vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() + vs.guard.SecretKey = security.Secret(in.GetSecretKey()) + } + }() + + for { + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return err + } + time.Sleep(sleepInterval) + } +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 1a912a169..e86c33bda 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -1,10 +1,8 @@ package weed_server import ( - "math/rand" "net/http" "sync" - "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -19,6 +17,7 @@ type VolumeServer struct { rack string store *storage.Store guard *security.Guard + masterNodes *storage.MasterNodes needleMapKind storage.NeedleMapType FixJpgOrientation bool @@ -70,36 +69,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, publicMux.HandleFunc("/", vs.publicReadOnlyHandler) } - go func() { - connected := true - - glog.V(0).Infof("Volume server bootstraps with master %s", vs.GetMasterNode()) - vs.store.SetBootstrapMaster(vs.GetMasterNode()) - vs.store.SetDataCenter(vs.dataCenter) - vs.store.SetRack(vs.rack) - for { - glog.V(4).Infof("Volume server sending to master %s", vs.GetMasterNode()) - master, secretKey, err := vs.store.SendHeartbeatToMaster() - if err == nil { - if !connected { - connected = true - vs.SetMasterNode(master) - vs.guard.SecretKey = secretKey - glog.V(0).Infoln("Volume Server Connected with master at", master) - } - } else { - glog.V(1).Infof("Volume Server Failed to talk with master %s: %v", vs.masterNode, err) - if connected { - connected = false - } - } - if connected { - time.Sleep(time.Duration(float32(vs.pulseSeconds*1e3)*(1+rand.Float32())) * time.Millisecond) - } else { - time.Sleep(time.Duration(float32(vs.pulseSeconds*1e3)*0.25) * time.Millisecond) - } - } - }() + go vs.heartbeat() return vs } diff --git a/weed/storage/store.go b/weed/storage/store.go index be2044d64..c62ac9ab7 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -1,7 +1,6 @@ package storage import ( - "encoding/json" "errors" "fmt" "math/rand" @@ -10,9 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb" ) const ( @@ -76,12 +73,12 @@ type Store struct { dataCenter string //optional informaton, overwriting master setting if exists rack string //optional information, overwriting master setting if exists connected bool - volumeSizeLimit uint64 //read from the master - masterNodes *MasterNodes + VolumeSizeLimit uint64 //read from the master + Client pb.Seaweed_SendHeartbeatClient } func (s *Store) String() (str string) { - str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d, masterNodes:%s", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.volumeSizeLimit, s.masterNodes) + str = fmt.Sprintf("Ip:%s, Port:%d, PublicUrl:%s, dataCenter:%s, rack:%s, connected:%v, volumeSizeLimit:%d", s.Ip, s.Port, s.PublicUrl, s.dataCenter, s.rack, s.connected, s.VolumeSizeLimit) return } @@ -208,15 +205,8 @@ func (s *Store) SetRack(rack string) { s.rack = rack } -func (s *Store) SetBootstrapMaster(bootstrapMaster string) { - s.masterNodes = NewMasterNodes(bootstrapMaster) -} -func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.Secret, e error) { - masterNode, e = s.masterNodes.FindMaster() - if e != nil { - return - } - var volumeMessages []*operation.VolumeInformationMessage +func (s *Store) CollectHeartbeat() *pb.Heartbeat { + var volumeMessages []*pb.VolumeInformationMessage maxVolumeCount := 0 var maxFileKey uint64 for _, location := range s.Locations { @@ -226,18 +216,18 @@ func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.S if maxFileKey < v.nm.MaxFileKey() { maxFileKey = v.nm.MaxFileKey() } - if !v.expired(s.volumeSizeLimit) { - volumeMessage := &operation.VolumeInformationMessage{ - Id: proto.Uint32(uint32(k)), - Size: proto.Uint64(uint64(v.Size())), - Collection: proto.String(v.Collection), - FileCount: proto.Uint64(uint64(v.nm.FileCount())), - DeleteCount: proto.Uint64(uint64(v.nm.DeletedCount())), - DeletedByteCount: proto.Uint64(v.nm.DeletedSize()), - ReadOnly: proto.Bool(v.readOnly), - ReplicaPlacement: proto.Uint32(uint32(v.ReplicaPlacement.Byte())), - Version: proto.Uint32(uint32(v.Version())), - Ttl: proto.Uint32(v.Ttl.ToUint32()), + if !v.expired(s.VolumeSizeLimit) { + volumeMessage := &pb.VolumeInformationMessage{ + Id: uint32(k), + Size: uint64(v.Size()), + Collection: v.Collection, + FileCount: uint64(v.nm.FileCount()), + DeleteCount: uint64(v.nm.DeletedCount()), + DeletedByteCount: v.nm.DeletedSize(), + ReadOnly: v.readOnly, + ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), + Version: uint32(v.Version()), + Ttl: v.Ttl.ToUint32(), } volumeMessages = append(volumeMessages, volumeMessage) } else { @@ -252,45 +242,17 @@ func (s *Store) SendHeartbeatToMaster() (masterNode string, secretKey security.S location.Unlock() } - joinMessage := &operation.JoinMessage{ - IsInit: proto.Bool(!s.connected), - Ip: proto.String(s.Ip), - Port: proto.Uint32(uint32(s.Port)), - PublicUrl: proto.String(s.PublicUrl), - MaxVolumeCount: proto.Uint32(uint32(maxVolumeCount)), - MaxFileKey: proto.Uint64(maxFileKey), - DataCenter: proto.String(s.dataCenter), - Rack: proto.String(s.rack), + return &pb.Heartbeat{ + Ip: s.Ip, + Port: uint32(s.Port), + PublicUrl: s.PublicUrl, + MaxVolumeCount: uint32(maxVolumeCount), + MaxFileKey: maxFileKey, + DataCenter: s.dataCenter, + Rack: s.rack, Volumes: volumeMessages, } - data, err := proto.Marshal(joinMessage) - if err != nil { - return "", "", err - } - - joinUrl := "http://" + masterNode + "/dir/join" - glog.V(4).Infof("Connecting to %s ...", joinUrl) - - jsonBlob, err := util.PostBytes(joinUrl, data) - if err != nil { - s.masterNodes.Reset() - return "", "", err - } - var ret operation.JoinResult - if err := json.Unmarshal(jsonBlob, &ret); err != nil { - glog.V(0).Infof("Failed to join %s with response: %s", joinUrl, string(jsonBlob)) - s.masterNodes.Reset() - return masterNode, "", err - } - if ret.Error != "" { - s.masterNodes.Reset() - return masterNode, "", errors.New(ret.Error) - } - s.volumeSizeLimit = ret.VolumeSizeLimit - secretKey = security.Secret(ret.SecretKey) - s.connected = true - return } func (s *Store) Close() { for _, location := range s.Locations { @@ -307,12 +269,14 @@ func (s *Store) Write(i VolumeId, n *Needle) (size uint32, err error) { if MaxPossibleVolumeSize >= v.ContentSize()+uint64(size) { size, err = v.writeNeedle(n) } else { - err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.volumeSizeLimit, v.ContentSize()) + err = fmt.Errorf("Volume Size Limit %d Exceeded! Current size is %d", s.VolumeSizeLimit, v.ContentSize()) } - if s.volumeSizeLimit < v.ContentSize()+3*uint64(size) { - glog.V(0).Infoln("volume", i, "size", v.ContentSize(), "will exceed limit", s.volumeSizeLimit) - if _, _, e := s.SendHeartbeatToMaster(); e != nil { - glog.V(0).Infoln("error when reporting size:", e) + if s.VolumeSizeLimit < v.ContentSize()+3*uint64(size) { + glog.V(0).Infoln("volume", i, "size", v.ContentSize(), "will exceed limit", s.VolumeSizeLimit) + if s.Client != nil { + if e := s.Client.Send(s.CollectHeartbeat()); e != nil { + glog.V(0).Infoln("error when reporting size:", e) + } } } return diff --git a/weed/storage/volume_info.go b/weed/storage/volume_info.go index b3068eec3..c73c27fe4 100644 --- a/weed/storage/volume_info.go +++ b/weed/storage/volume_info.go @@ -2,8 +2,9 @@ package storage import ( "fmt" - "github.com/chrislusf/seaweedfs/weed/operation" "sort" + + "github.com/chrislusf/seaweedfs/weed/pb" ) type VolumeInfo struct { @@ -19,23 +20,23 @@ type VolumeInfo struct { ReadOnly bool } -func NewVolumeInfo(m *operation.VolumeInformationMessage) (vi VolumeInfo, err error) { +func NewVolumeInfo(m *pb.VolumeInformationMessage) (vi VolumeInfo, err error) { vi = VolumeInfo{ - Id: VolumeId(*m.Id), - Size: *m.Size, - Collection: *m.Collection, - FileCount: int(*m.FileCount), - DeleteCount: int(*m.DeleteCount), - DeletedByteCount: *m.DeletedByteCount, - ReadOnly: *m.ReadOnly, - Version: Version(*m.Version), + Id: VolumeId(m.Id), + Size: m.Size, + Collection: m.Collection, + FileCount: int(m.FileCount), + DeleteCount: int(m.DeleteCount), + DeletedByteCount: m.DeletedByteCount, + ReadOnly: m.ReadOnly, + Version: Version(m.Version), } - rp, e := NewReplicaPlacementFromByte(byte(*m.ReplicaPlacement)) + rp, e := NewReplicaPlacementFromByte(byte(m.ReplicaPlacement)) if e != nil { return vi, e } vi.ReplicaPlacement = rp - vi.Ttl = LoadTTLFromUint32(*m.Ttl) + vi.Ttl = LoadTTLFromUint32(m.Ttl) return vi, nil } diff --git a/weed/topology/data_node.go b/weed/topology/data_node.go index b7f039559..0ef8ae14e 100644 --- a/weed/topology/data_node.go +++ b/weed/topology/data_node.go @@ -15,7 +15,6 @@ type DataNode struct { Port int PublicUrl string LastSeen int64 // unix time in seconds - Dead bool } func NewDataNode(id string) *DataNode { @@ -30,7 +29,7 @@ func NewDataNode(id string) *DataNode { func (dn *DataNode) String() string { dn.RLock() defer dn.RUnlock() - return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s, Dead:%v", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl, dn.Dead) + return fmt.Sprintf("Node:%s, volumes:%v, Ip:%s, Port:%d, PublicUrl:%s", dn.NodeImpl.String(), dn.volumes, dn.Ip, dn.Port, dn.PublicUrl) } func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) { diff --git a/weed/topology/node.go b/weed/topology/node.go index 4ce35f4b0..7383f9576 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -242,12 +242,6 @@ func (n *NodeImpl) CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSi if n.IsRack() { for _, c := range n.Children() { dn := c.(*DataNode) //can not cast n to DataNode - if dn.LastSeen < freshThreshHold { - if !dn.Dead { - dn.Dead = true - n.GetTopology().chanDeadDataNodes <- dn - } - } for _, v := range dn.GetVolumes() { if uint64(v.Size) >= volumeSizeLimit { //fmt.Println("volume",v.Id,"size",v.Size,">",volumeSizeLimit) diff --git a/weed/topology/rack.go b/weed/topology/rack.go index 1ca2f8de8..a48d64323 100644 --- a/weed/topology/rack.go +++ b/weed/topology/rack.go @@ -32,11 +32,6 @@ func (r *Rack) GetOrCreateDataNode(ip string, port int, publicUrl string, maxVol dn := c.(*DataNode) if dn.MatchLocation(ip, port) { dn.LastSeen = time.Now().Unix() - if dn.Dead { - dn.Dead = false - r.GetTopology().chanRecoveredDataNodes <- dn - dn.UpAdjustMaxVolumeCountDelta(maxVolumeCount - dn.maxVolumeCount) - } return dn } } diff --git a/weed/topology/topology.go b/weed/topology/topology.go index 04b500053..ffd32ae21 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -7,7 +7,6 @@ import ( "github.com/chrislusf/raft" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" @@ -24,11 +23,9 @@ type Topology struct { Sequence sequence.Sequencer - chanDeadDataNodes chan *DataNode - chanRecoveredDataNodes chan *DataNode - chanFullVolumes chan storage.VolumeInfo + chanFullVolumes chan storage.VolumeInfo - configuration *Configuration + Configuration *Configuration RaftServer raft.Server } @@ -45,8 +42,6 @@ func NewTopology(id string, confFile string, seq sequence.Sequencer, volumeSizeL t.Sequence = seq - t.chanDeadDataNodes = make(chan *DataNode) - t.chanRecoveredDataNodes = make(chan *DataNode) t.chanFullVolumes = make(chan storage.VolumeInfo) err := t.loadConfiguration(confFile) @@ -80,7 +75,7 @@ func (t *Topology) Leader() (string, error) { func (t *Topology) loadConfiguration(configurationFile string) error { b, e := ioutil.ReadFile(configurationFile) if e == nil { - t.configuration, e = NewConfiguration(b) + t.Configuration, e = NewConfiguration(b) return e } glog.V(0).Infoln("Using default configurations.") @@ -147,35 +142,6 @@ func (t *Topology) UnRegisterVolumeLayout(v storage.VolumeInfo, dn *DataNode) { t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl).UnRegisterVolume(&v, dn) } -func (t *Topology) ProcessJoinMessage(joinMessage *operation.JoinMessage) { - t.Sequence.SetMax(*joinMessage.MaxFileKey) - dcName, rackName := t.configuration.Locate(*joinMessage.Ip, *joinMessage.DataCenter, *joinMessage.Rack) - dc := t.GetOrCreateDataCenter(dcName) - rack := dc.GetOrCreateRack(rackName) - dn := rack.FindDataNode(*joinMessage.Ip, int(*joinMessage.Port)) - if *joinMessage.IsInit && dn != nil { - t.UnRegisterDataNode(dn) - } - dn = rack.GetOrCreateDataNode(*joinMessage.Ip, - int(*joinMessage.Port), *joinMessage.PublicUrl, - int(*joinMessage.MaxVolumeCount)) - var volumeInfos []storage.VolumeInfo - for _, v := range joinMessage.Volumes { - if vi, err := storage.NewVolumeInfo(v); err == nil { - volumeInfos = append(volumeInfos, vi) - } else { - glog.V(0).Infoln("Fail to convert joined volume information:", err.Error()) - } - } - deletedVolumes := dn.UpdateVolumes(volumeInfos) - for _, v := range volumeInfos { - t.RegisterVolumeLayout(v, dn) - } - for _, v := range deletedVolumes { - t.UnRegisterVolumeLayout(v, dn) - } -} - func (t *Topology) GetOrCreateDataCenter(dcName string) *DataCenter { for _, c := range t.Children() { dc := c.(*DataCenter) diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 476aaf4d8..40019fdcd 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -31,12 +31,6 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold string) { select { case v := <-t.chanFullVolumes: t.SetVolumeCapacityFull(v) - case dn := <-t.chanRecoveredDataNodes: - t.RegisterRecoveredDataNode(dn) - glog.V(0).Infoln("Recovered DataNode: %v", dn) - case dn := <-t.chanDeadDataNodes: - t.UnRegisterDataNode(dn) - glog.V(0).Infof("Dead DataNode: %v", dn) } } }() @@ -64,11 +58,3 @@ func (t *Topology) UnRegisterDataNode(dn *DataNode) { dn.UpAdjustMaxVolumeCountDelta(-dn.GetMaxVolumeCount()) dn.Parent().UnlinkChildNode(dn.Id()) } -func (t *Topology) RegisterRecoveredDataNode(dn *DataNode) { - for _, v := range dn.GetVolumes() { - vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) - if vl.isWritable(&v) { - vl.SetVolumeAvailable(dn, v.Id) - } - } -} From e767c3ea4fd04a9cb01ec6698b4582663bd98bb6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Jan 2017 01:30:00 -0800 Subject: [PATCH 58/61] disable master connection timeout temporarily disable master connection timeout due to heartbeat connection timeout --- weed/command/master.go | 14 +++++++------- weed/command/server.go | 2 +- weed/server/volume_grpc_client.go | 20 +++++++++++++++++--- weed/topology/node.go | 2 +- weed/topology/topology_event_handling.go | 2 +- weed/util/net_timeout.go | 16 ++++++++++------ 6 files changed, 37 insertions(+), 19 deletions(-) diff --git a/weed/command/master.go b/weed/command/master.go index eee22810b..aed92fa33 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -43,12 +43,12 @@ var ( mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") confFile = cmdMaster.Flag.String("conf", "/etc/weedfs/weedfs.conf", "Deprecating! xml configuration file") defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") - mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds") - mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - garbageThreshold = cmdMaster.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces") - masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") - masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") + // mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds") + mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") + garbageThreshold = cmdMaster.Flag.String("garbageThreshold", "0.3", "threshold to vacuum and reclaim spaces") + masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") masterWhiteList []string ) @@ -87,7 +87,7 @@ func runMaster(cmd *Command, args []string) bool { glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress) - listener, e := util.NewListener(listeningAddress, time.Duration(*mTimeout)*time.Second) + listener, e := util.NewListener(listeningAddress, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } diff --git a/weed/command/server.go b/weed/command/server.go index 5bde22517..b4b98ba1e 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -215,7 +215,7 @@ func runServer(cmd *Command, args []string) bool { ) glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*masterPort)) - masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), time.Duration(*serverTimeout)*time.Second) + masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } diff --git a/weed/server/volume_grpc_client.go b/weed/server/volume_grpc_client.go index 54e2c2f75..ac3871c8c 100644 --- a/weed/server/volume_grpc_client.go +++ b/weed/server/volume_grpc_client.go @@ -53,10 +53,13 @@ func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error { vs.store.Client = stream defer func() { vs.store.Client = nil }() + doneChan := make(chan error, 1) + go func() { for { in, err := stream.Recv() if err != nil { + doneChan <- err return } vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() @@ -64,11 +67,22 @@ func (vs *VolumeServer) doHeartbeat(sleepInterval time.Duration) error { } }() + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return err + } + + tickChan := time.NewTimer(sleepInterval).C + for { - if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { - glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + select { + case <-tickChan: + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return err + } + case err := <-doneChan: return err } - time.Sleep(sleepInterval) } } diff --git a/weed/topology/node.go b/weed/topology/node.go index 7383f9576..206a9aff4 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -234,7 +234,7 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) { n.UpAdjustVolumeCountDelta(-node.GetVolumeCount()) n.UpAdjustActiveVolumeCountDelta(-node.GetActiveVolumeCount()) n.UpAdjustMaxVolumeCountDelta(-node.GetMaxVolumeCount()) - glog.V(0).Infoln(n, "removes", node, "volumeCount =", n.activeVolumeCount) + glog.V(0).Infoln(n, "removes", node.Id()) } } diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go index 40019fdcd..e2dcfca06 100644 --- a/weed/topology/topology_event_handling.go +++ b/weed/topology/topology_event_handling.go @@ -49,7 +49,7 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool { } func (t *Topology) UnRegisterDataNode(dn *DataNode) { for _, v := range dn.GetVolumes() { - glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn) + glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id()) vl := t.GetVolumeLayout(v.Collection, v.ReplicaPlacement, v.Ttl) vl.SetVolumeUnavailable(dn, v.Id) } diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index f46776992..8acd50d42 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -38,9 +38,11 @@ type Conn struct { } func (c *Conn) Read(b []byte) (count int, e error) { - err := c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout)) - if err != nil { - return 0, err + if c.ReadTimeout != 0 { + err := c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout)) + if err != nil { + return 0, err + } } count, e = c.Conn.Read(b) if e == nil { @@ -50,9 +52,11 @@ func (c *Conn) Read(b []byte) (count int, e error) { } func (c *Conn) Write(b []byte) (count int, e error) { - err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) - if err != nil { - return 0, err + if c.WriteTimeout != 0 { + err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) + if err != nil { + return 0, err + } } count, e = c.Conn.Write(b) if e == nil { From adc8d344a524a093dda867058ef852c4ea4d7e76 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Jan 2017 23:57:16 -0800 Subject: [PATCH 59/61] update proto buffer --- weed/pb/seaweed.pb.go | 101 +++++++++++++++----------------- weed/pb/seaweed.proto | 17 +++--- weed/proto/Makefile | 4 -- weed/proto/system_message.proto | 27 --------- 4 files changed, 54 insertions(+), 95 deletions(-) delete mode 100644 weed/proto/Makefile delete mode 100644 weed/proto/system_message.proto diff --git a/weed/pb/seaweed.pb.go b/weed/pb/seaweed.pb.go index 02de2d8a6..19bbb39d8 100644 --- a/weed/pb/seaweed.pb.go +++ b/weed/pb/seaweed.pb.go @@ -36,16 +36,15 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Heartbeat struct { - IsInit bool `protobuf:"varint,1,opt,name=is_init,json=isInit" json:"is_init,omitempty"` - Ip string `protobuf:"bytes,2,opt,name=ip" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port" json:"port,omitempty"` - PublicUrl string `protobuf:"bytes,4,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - MaxVolumeCount uint32 `protobuf:"varint,5,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - MaxFileKey uint64 `protobuf:"varint,6,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` - DataCenter string `protobuf:"bytes,7,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,8,opt,name=rack" json:"rack,omitempty"` + Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` + MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` + MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"` + AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` - AdminPort uint32 `protobuf:"varint,10,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` } func (m *Heartbeat) Reset() { *m = Heartbeat{} } @@ -53,13 +52,6 @@ func (m *Heartbeat) String() string { return proto.CompactTextString( func (*Heartbeat) ProtoMessage() {} func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Heartbeat) GetIsInit() bool { - if m != nil { - return m.IsInit - } - return false -} - func (m *Heartbeat) GetIp() string { if m != nil { return m.Ip @@ -109,18 +101,18 @@ func (m *Heartbeat) GetRack() string { return "" } -func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { +func (m *Heartbeat) GetAdminPort() uint32 { if m != nil { - return m.Volumes + return m.AdminPort } - return nil + return 0 } -func (m *Heartbeat) GetAdminPort() uint32 { +func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { if m != nil { - return m.AdminPort + return m.Volumes } - return 0 + return nil } type HeartbeatResponse struct { @@ -348,37 +340,36 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("seaweed.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 511 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0x41, 0x6f, 0xd3, 0x4c, - 0x10, 0x86, 0x3f, 0x3b, 0xfe, 0x92, 0x78, 0x52, 0x97, 0x74, 0x25, 0x84, 0x05, 0x05, 0x4c, 0x4e, - 0x96, 0x40, 0x11, 0x2a, 0x12, 0x17, 0x6e, 0x54, 0xaa, 0xa8, 0x0a, 0xa2, 0xda, 0x08, 0x2e, 0x1c, - 0xac, 0xb5, 0x3d, 0x45, 0xab, 0xae, 0xd7, 0xd6, 0x7a, 0x53, 0xe2, 0xfe, 0x39, 0x2e, 0xfc, 0x30, - 0xb4, 0xb3, 0x49, 0x5a, 0x90, 0xb8, 0xcd, 0x3c, 0xfb, 0x8e, 0x77, 0x67, 0xde, 0x31, 0x24, 0x3d, - 0x8a, 0x1f, 0x88, 0xf5, 0xb2, 0x33, 0xad, 0x6d, 0x59, 0xd8, 0x95, 0x8b, 0x9f, 0x21, 0xc4, 0x1f, - 0x50, 0x18, 0x5b, 0xa2, 0xb0, 0xec, 0x11, 0x4c, 0x64, 0x5f, 0x48, 0x2d, 0x6d, 0x1a, 0x64, 0x41, - 0x3e, 0xe5, 0x63, 0xd9, 0x9f, 0x6b, 0x69, 0xd9, 0x21, 0x84, 0xb2, 0x4b, 0xc3, 0x2c, 0xc8, 0x63, - 0x1e, 0xca, 0x8e, 0x31, 0x88, 0xba, 0xd6, 0xd8, 0x74, 0x94, 0x05, 0x79, 0xc2, 0x29, 0x66, 0x4f, - 0x01, 0xba, 0x75, 0xa9, 0x64, 0x55, 0xac, 0x8d, 0x4a, 0x23, 0xd2, 0xc6, 0x9e, 0x7c, 0x31, 0x8a, - 0xe5, 0x30, 0x6f, 0xc4, 0xa6, 0xb8, 0x69, 0xd5, 0xba, 0xc1, 0xa2, 0x6a, 0xd7, 0xda, 0xa6, 0xff, - 0x53, 0xf9, 0x61, 0x23, 0x36, 0x5f, 0x09, 0x9f, 0x3a, 0xca, 0x32, 0x38, 0x70, 0xca, 0x2b, 0xa9, - 0xb0, 0xb8, 0xc6, 0x21, 0x1d, 0x67, 0x41, 0x1e, 0x71, 0x68, 0xc4, 0xe6, 0x4c, 0x2a, 0xbc, 0xc0, - 0x81, 0x3d, 0x87, 0x59, 0x2d, 0xac, 0x28, 0x2a, 0xd4, 0x16, 0x4d, 0x3a, 0xa1, 0xbb, 0xc0, 0xa1, - 0x53, 0x22, 0xee, 0x7d, 0x46, 0x54, 0xd7, 0xe9, 0x94, 0x4e, 0x28, 0x66, 0x6f, 0x61, 0xe2, 0x2f, - 0xef, 0xd3, 0x38, 0x1b, 0xe5, 0xb3, 0x93, 0xe3, 0x65, 0x57, 0x2e, 0xfd, 0xc5, 0xe7, 0xfa, 0xaa, - 0x35, 0x8d, 0xb0, 0xb2, 0xd5, 0x9f, 0xb0, 0xef, 0xc5, 0x77, 0xe4, 0x3b, 0xb1, 0xeb, 0x4b, 0xd4, - 0x8d, 0xd4, 0x05, 0x75, 0x0c, 0xf4, 0xe4, 0x98, 0xc8, 0x65, 0x6b, 0xec, 0xe2, 0x1b, 0x1c, 0xed, - 0x07, 0xc8, 0xb1, 0xef, 0x5a, 0xdd, 0x23, 0xcb, 0xe1, 0x81, 0x2f, 0x5f, 0xc9, 0x5b, 0xfc, 0x28, - 0x9b, 0xed, 0x40, 0x23, 0xfe, 0x37, 0x66, 0xc7, 0x10, 0xf7, 0x58, 0x19, 0xb4, 0x17, 0x38, 0x6c, - 0x07, 0x7c, 0x07, 0x16, 0xbf, 0x42, 0x48, 0xff, 0xf5, 0x42, 0x32, 0xa5, 0xa6, 0xef, 0x26, 0x3c, - 0x94, 0xb5, 0x6b, 0xba, 0x97, 0xb7, 0x48, 0x5f, 0x89, 0x38, 0xc5, 0xec, 0x19, 0x40, 0xd5, 0x2a, - 0x85, 0x95, 0x2b, 0x24, 0xbb, 0x62, 0x7e, 0x8f, 0xb8, 0xe6, 0x68, 0xce, 0xde, 0x8f, 0x88, 0x2a, - 0x63, 0x47, 0xbc, 0x15, 0x2f, 0xe0, 0xa0, 0x46, 0x85, 0xf6, 0xbe, 0x61, 0x11, 0x9f, 0x79, 0xe6, - 0x25, 0xaf, 0x80, 0xf9, 0xb4, 0x2e, 0xca, 0x61, 0x2f, 0xf4, 0x9e, 0xcd, 0xb7, 0x27, 0xef, 0x87, - 0x9d, 0xfa, 0x09, 0xc4, 0x06, 0x45, 0x5d, 0xb4, 0x5a, 0x0d, 0xe4, 0xdb, 0x94, 0x4f, 0x1d, 0xf8, - 0xac, 0xd5, 0xc0, 0x5e, 0xc2, 0x91, 0xc1, 0x4e, 0xc9, 0x4a, 0x14, 0x9d, 0x12, 0x15, 0x36, 0xa8, - 0x2d, 0x59, 0x98, 0xf0, 0xf9, 0xf6, 0xe0, 0x72, 0xc7, 0x59, 0x0a, 0x93, 0x1b, 0x34, 0xbd, 0x6b, - 0x2b, 0x26, 0xc9, 0x2e, 0x65, 0x73, 0x18, 0x59, 0xab, 0xb6, 0x4e, 0xb9, 0xf0, 0xe4, 0x0c, 0x26, - 0x2b, 0xbf, 0xfa, 0xec, 0x1d, 0x24, 0x2b, 0xd4, 0xf5, 0xdd, 0xce, 0x27, 0x6e, 0x0b, 0xf6, 0xe9, - 0xe3, 0x87, 0x7f, 0xa4, 0x3b, 0x43, 0x17, 0xff, 0xe5, 0xc1, 0xeb, 0xa0, 0x1c, 0xd3, 0x8f, 0xf3, - 0xe6, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0xc1, 0xd3, 0x35, 0x49, 0x03, 0x00, 0x00, + // 489 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x4d, 0x1a, 0xdb, 0xe6, 0x74, 0xbb, 0x76, 0x07, 0x84, 0x41, 0x57, 0x8d, 0xbd, 0x0a, + 0x28, 0x45, 0x56, 0xf0, 0xc6, 0x3b, 0x17, 0x16, 0x65, 0x15, 0x97, 0x29, 0x7a, 0xe3, 0x45, 0x98, + 0x24, 0x67, 0x65, 0xd8, 0xc9, 0x1f, 0x26, 0xd3, 0xb5, 0xd9, 0x07, 0xf2, 0x49, 0x7c, 0x30, 0x99, + 0x33, 0x4d, 0xab, 0x82, 0x77, 0xe7, 0xfc, 0xce, 0x97, 0xe4, 0x9b, 0xf3, 0x4d, 0x60, 0xde, 0xa1, + 0xfc, 0x81, 0x58, 0xae, 0x5a, 0xd3, 0xd8, 0x86, 0x85, 0x6d, 0xbe, 0xfc, 0x19, 0x42, 0xfc, 0x1e, + 0xa5, 0xb1, 0x39, 0x4a, 0xcb, 0x8e, 0x21, 0x54, 0x2d, 0x0f, 0x92, 0x20, 0x8d, 0x45, 0xa8, 0x5a, + 0xc6, 0x20, 0x6a, 0x1b, 0x63, 0x79, 0x98, 0x04, 0xe9, 0x5c, 0x50, 0xcd, 0x9e, 0x00, 0xb4, 0x9b, + 0x5c, 0xab, 0x22, 0xdb, 0x18, 0xcd, 0x47, 0xa4, 0x8d, 0x3d, 0xf9, 0x62, 0x34, 0x4b, 0x61, 0x51, + 0xc9, 0x6d, 0x76, 0xdb, 0xe8, 0x4d, 0x85, 0x59, 0xd1, 0x6c, 0x6a, 0xcb, 0x23, 0x7a, 0xfc, 0xb8, + 0x92, 0xdb, 0xaf, 0x84, 0xcf, 0x1d, 0x65, 0x09, 0x1c, 0x39, 0xe5, 0xb5, 0xd2, 0x98, 0xdd, 0x60, + 0xcf, 0xef, 0x27, 0x41, 0x1a, 0x09, 0xa8, 0xe4, 0xf6, 0x42, 0x69, 0xbc, 0xc4, 0x9e, 0x3d, 0x83, + 0x59, 0x29, 0xad, 0xcc, 0x0a, 0xac, 0x2d, 0x1a, 0x3e, 0xa6, 0x6f, 0x81, 0x43, 0xe7, 0x44, 0x9c, + 0x3f, 0x23, 0x8b, 0x1b, 0x3e, 0xa1, 0x09, 0xd5, 0xce, 0x9f, 0x2c, 0x2b, 0x55, 0x67, 0xe4, 0x7c, + 0x4a, 0x9f, 0x8e, 0x89, 0x5c, 0x39, 0xfb, 0x6f, 0x60, 0xe2, 0xbd, 0x75, 0x3c, 0x4e, 0x46, 0xe9, + 0xec, 0xec, 0x74, 0xd5, 0xe6, 0x2b, 0xef, 0xeb, 0x43, 0x7d, 0xdd, 0x98, 0x4a, 0x5a, 0xd5, 0xd4, + 0x9f, 0xb0, 0xeb, 0xe4, 0x77, 0x14, 0x83, 0x78, 0xf9, 0x0d, 0x4e, 0xf6, 0x7b, 0x12, 0xd8, 0xb5, + 0x4d, 0xdd, 0x21, 0x4b, 0xe1, 0x81, 0x9f, 0xaf, 0xd5, 0x1d, 0x7e, 0x54, 0x95, 0xb2, 0xb4, 0xbc, + 0x48, 0xfc, 0x8b, 0xd9, 0x29, 0xc4, 0x1d, 0x16, 0x06, 0xed, 0x25, 0xf6, 0xb4, 0xce, 0x58, 0x1c, + 0xc0, 0xf2, 0x57, 0x08, 0xfc, 0x7f, 0x16, 0x28, 0x94, 0x92, 0xde, 0x3b, 0x17, 0xa1, 0x2a, 0xdd, + 0xa1, 0x3b, 0x75, 0x87, 0xf4, 0x96, 0x48, 0x50, 0xcd, 0x9e, 0x02, 0x14, 0x8d, 0xd6, 0x58, 0xb8, + 0x07, 0x77, 0xa1, 0xfc, 0x41, 0xdc, 0x52, 0x68, 0xcf, 0x87, 0x3c, 0x22, 0x11, 0x3b, 0xe2, 0xa3, + 0x78, 0x0e, 0x47, 0x25, 0x6a, 0xb4, 0x83, 0xc0, 0x47, 0x31, 0xf3, 0xcc, 0x4b, 0x5e, 0x02, 0xf3, + 0x6d, 0x99, 0xe5, 0xfd, 0x5e, 0x38, 0x26, 0xe1, 0x62, 0x37, 0x79, 0xd7, 0x0f, 0xea, 0xc7, 0x10, + 0x1b, 0x94, 0x65, 0xd6, 0xd4, 0xba, 0xa7, 0x74, 0xa6, 0x62, 0xea, 0xc0, 0xe7, 0x5a, 0xf7, 0xec, + 0x05, 0x9c, 0x18, 0x6c, 0xb5, 0x2a, 0x64, 0xd6, 0x6a, 0x59, 0x60, 0x85, 0xf5, 0x10, 0xd4, 0x62, + 0x37, 0xb8, 0x1a, 0x38, 0xe3, 0x30, 0xb9, 0x45, 0xd3, 0xb9, 0x63, 0xc5, 0x24, 0x19, 0x5a, 0xb6, + 0x80, 0x91, 0xb5, 0x9a, 0x03, 0x51, 0x57, 0x9e, 0x5d, 0xc0, 0x64, 0xed, 0x6f, 0x38, 0x7b, 0x0b, + 0xf3, 0x35, 0xd6, 0xe5, 0xe1, 0x6a, 0xcf, 0x5d, 0xcc, 0xfb, 0xf6, 0xd1, 0xc3, 0xbf, 0xda, 0x21, + 0xd0, 0xe5, 0xbd, 0x34, 0x78, 0x15, 0xe4, 0x63, 0xfa, 0x3f, 0x5e, 0xff, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0xd5, 0x08, 0xa6, 0xf2, 0x30, 0x03, 0x00, 0x00, } diff --git a/weed/pb/seaweed.proto b/weed/pb/seaweed.proto index 2dc8343a2..b796b9c26 100644 --- a/weed/pb/seaweed.proto +++ b/weed/pb/seaweed.proto @@ -11,16 +11,15 @@ service Seaweed { ////////////////////////////////////////////////// message Heartbeat { - bool is_init = 1; - string ip = 2; - uint32 port = 3; - string public_url = 4; - uint32 max_volume_count = 5; - uint64 max_file_key = 6; - string data_center = 7; - string rack = 8; + string ip = 1; + uint32 port = 2; + string public_url = 3; + uint32 max_volume_count = 4; + uint64 max_file_key = 5; + string data_center = 6; + string rack = 7; + uint32 admin_port = 8; repeated VolumeInformationMessage volumes = 9; - uint32 admin_port = 10; } message HeartbeatResponse { uint64 volumeSizeLimit = 1; diff --git a/weed/proto/Makefile b/weed/proto/Makefile deleted file mode 100644 index 73af851dd..000000000 --- a/weed/proto/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -TARG=../operation - -all: - protoc --go_out=$(TARG) system_message.proto diff --git a/weed/proto/system_message.proto b/weed/proto/system_message.proto deleted file mode 100644 index 548360b27..000000000 --- a/weed/proto/system_message.proto +++ /dev/null @@ -1,27 +0,0 @@ -package operation; - -message VolumeInformationMessage { - required uint32 id = 1; - required uint64 size = 2; - optional string collection = 3; - required uint64 file_count = 4; - required uint64 delete_count = 5; - required uint64 deleted_byte_count = 6; - optional bool read_only = 7; - required uint32 replica_placement = 8; - optional uint32 version = 9 [default=2]; - optional uint32 ttl = 10; -} - -message JoinMessage { - optional bool is_init = 1; - required string ip = 2; - required uint32 port = 3; - optional string public_url = 4; - required uint32 max_volume_count = 5; - required uint64 max_file_key = 6; - optional string data_center = 7; - optional string rack = 8; - repeated VolumeInformationMessage volumes = 9; - optional uint32 admin_port = 10; -} From 1be354bdce0c48636807bb98b94504fc51dd73f9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Jan 2017 23:57:59 -0800 Subject: [PATCH 60/61] default IP to "localhost" so that the master can know the ip address. --- weed/command/volume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/volume.go b/weed/command/volume.go index 0e69325b6..a767356e3 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -43,7 +43,7 @@ func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", "", "ip or server name") + v.ip = cmdVolume.Flag.String("ip", "localhost", "ip or server name") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") v.master = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location") From d953ed442cb027393399e2c5507e6de45c2c0923 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 12 Jan 2017 13:42:53 -0800 Subject: [PATCH 61/61] auto detect volume server ip address --- weed/command/volume.go | 2 +- weed/server/master_grpc_server.go | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/weed/command/volume.go b/weed/command/volume.go index a767356e3..0e69325b6 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -43,7 +43,7 @@ func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", "localhost", "ip or server name") + v.ip = cmdVolume.Flag.String("ip", "", "ip or server name") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") v.master = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location") diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 29c95a3d4..b5cbd85b7 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -1,10 +1,14 @@ package weed_server import ( + "net" + "strings" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/topology" + "google.golang.org/grpc/peer" ) func (ms MasterServer) SendHeartbeat(stream pb.Seaweed_SendHeartbeatServer) error { @@ -15,6 +19,14 @@ func (ms MasterServer) SendHeartbeat(stream pb.Seaweed_SendHeartbeatServer) erro if err == nil { if dn == nil { t.Sequence.SetMax(heartbeat.MaxFileKey) + if heartbeat.Ip == "" { + if pr, ok := peer.FromContext(stream.Context()); ok { + if pr.Addr != net.Addr(nil) { + heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")] + glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip) + } + } + } dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName)