Browse Source

Delete all chunks when delete a ChunkManifest

LoadChunkManifest can uncompress buffer
move compress.go from storage to operation because of import cycle
MakeFile add cross complete command
pull/224/head
tnextday 9 years ago
parent
commit
662915e691
  1. 1
      .gitignore
  2. 25
      Makefile
  3. 16
      go/operation/chunked_file.go
  4. 2
      go/operation/compress.go
  5. 5
      go/storage/needle.go
  6. 11
      go/weed/weed_server/volume_server_handlers_read.go
  7. 11
      go/weed/weed_server/volume_server_handlers_write.go

1
.gitignore

@ -74,3 +74,4 @@ com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
test_data

25
Makefile

@ -1,11 +1,22 @@
BINARY = weed
.clean:
go clean -i -v ./go/weed/
GO_FLAGS = #-v
SOURCE_DIR = ./go/weed/
.deps:
go get -d ./go/weed/
all: build
.build: .deps
go build -v ./go/weed/
.PHONY : clean deps build linux
all: .build
clean:
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
rm -f $(BINARY)
deps:
go get $(GO_FLAGS) -d $(SOURCE_DIR)
build: deps
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
linux: deps
mkdir -p linux
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)

16
go/operation/chunked_file.go

@ -30,10 +30,10 @@ type ChunkInfo struct {
type ChunkList []*ChunkInfo
type ChunkManifest struct {
Name string `json:"name,omitempty"`
Mime string `json:"mime,omitempty"`
Size int64 `json:"size,omitempty"`
Chunks ChunkList `json:"chunks,omitempty"`
Name string `json:"name,omitempty"`
Mime string `json:"mime,omitempty"`
Size int64 `json:"size,omitempty"`
Chunks ChunkList `json:"chunks,omitempty"`
}
// seekable chunked file reader
@ -50,7 +50,13 @@ func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func LoadChunkedManifest(buffer []byte) (*ChunkManifest, error) {
func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
if isGzipped {
var err error
if buffer, err = UnGzipData(buffer); err != nil {
return nil, err
}
}
cm := ChunkManifest{}
if e := json.Unmarshal(buffer, &cm); e != nil {
return nil, e

2
go/storage/compress.go → go/operation/compress.go

@ -1,4 +1,4 @@
package storage
package operation
import (
"bytes"

5
go/storage/needle.go

@ -15,6 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/images"
"github.com/chrislusf/seaweedfs/go/util"
"github.com/chrislusf/seaweedfs/go/operation"
)
const (
@ -117,8 +118,8 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
}
if part.Header.Get("Content-Encoding") == "gzip" {
isGzipped = true
} else if IsGzippable(ext, mtype) {
if data, e = GzipData(data); e != nil {
} else if operation.IsGzippable(ext, mtype) {
if data, e = operation.GzipData(data); e != nil {
return
}
isGzipped = true

11
go/weed/weed_server/volume_server_handlers_read.go

@ -114,7 +114,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
} else {
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
if n.Data, err = operation.UnGzipData(n.Data); err != nil {
glog.V(0).Infoln("lookup error:", err, r.URL.Path)
}
}
@ -230,15 +230,8 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string,
return false
}
processed = true
if n.IsGzipped(){
var err error
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
glog.V(0).Infoln("ungzip data error:", err, r.URL.Path)
return false
}
}
chunkManifest, e := operation.LoadChunkedManifest(n.Data)
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
if e != nil {
glog.V(0).Infoln("load chunked manifest error:", e)
return false

11
go/weed/weed_server/volume_server_handlers_write.go

@ -66,6 +66,17 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
return
}
if n.IsChunkedManifest(){
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
if e != nil {
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Load chunks manifest error: " + e.Error()))
return
}
if e := chunkManifest.DeleteChunks(vs.GetMasterNode()); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Delete chunks error: " + e.Error()))
return
}
}
ret := topology.ReplicatedDelete(vs.GetMasterNode(), vs.store, volumeId, n, r)

Loading…
Cancel
Save