Browse Source

Merge branch 'feature/chunked-file-support' into develop

pull/279/head
tnextday 10 years ago
parent
commit
b5a1b64255
  1. 1
      .gitignore
  2. 25
      Makefile
  3. 217
      go/operation/chunked_file.go
  4. 2
      go/operation/compress.go
  5. 10
      go/operation/delete_content.go
  6. 48
      go/operation/submit.go
  7. 16
      go/storage/needle.go
  8. 9
      go/storage/needle_read_write.go
  9. 1
      go/topology/volume_location_list.go
  10. 16
      go/util/http_util.go
  11. 2
      go/weed/weed_server/common.go
  12. 155
      go/weed/weed_server/volume_server_handlers_read.go
  13. 64
      go/weed/weed_server/volume_server_handlers_write.go

1
.gitignore

@ -74,3 +74,4 @@ com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
test_data

25
Makefile

@ -1,11 +1,22 @@
BINARY = weed
.clean:
go clean -i -v ./go/weed/
GO_FLAGS = #-v
SOURCE_DIR = ./go/weed/
.deps:
go get -d ./go/weed/
all: build
.build: .deps
go build -v ./go/weed/
.PHONY : clean deps build linux
all: .build
clean:
go clean -i $(GO_FLAGS) $(SOURCE_DIR)
rm -f $(BINARY)
deps:
go get $(GO_FLAGS) -d $(SOURCE_DIR)
build: deps
go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR)
linux: deps
mkdir -p linux
GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR)

217
go/operation/chunked_file.go

@ -0,0 +1,217 @@
package operation
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sort"
"sync"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/util"
)
var (
// when the remote server does not allow range requests (Accept-Ranges was not set)
ErrRangeRequestsNotSupported = errors.New("Range requests are not supported by the remote server")
// ErrInvalidRange is returned by Read when trying to read past the end of the file
ErrInvalidRange = errors.New("Invalid range")
)
type ChunkInfo struct {
Fid string `json:"fid"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
}
type ChunkList []*ChunkInfo
type ChunkManifest struct {
Name string `json:"name,omitempty"`
Mime string `json:"mime,omitempty"`
Size int64 `json:"size,omitempty"`
Chunks ChunkList `json:"chunks,omitempty"`
}
// seekable chunked file reader
type ChunkedFileReader struct {
Manifest *ChunkManifest
Master string
pos int64
pr *io.PipeReader
pw *io.PipeWriter
mutex sync.Mutex
}
func (s ChunkList) Len() int { return len(s) }
func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset }
func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) {
if isGzipped {
var err error
if buffer, err = UnGzipData(buffer); err != nil {
return nil, err
}
}
cm := ChunkManifest{}
if e := json.Unmarshal(buffer, &cm); e != nil {
return nil, e
}
sort.Sort(cm.Chunks)
return &cm, nil
}
func (cm *ChunkManifest) GetData() ([]byte, error) {
return json.Marshal(cm)
}
func (cm *ChunkManifest) DeleteChunks(master string) error {
deleteError := 0
for _, ci := range cm.Chunks {
if e := DeleteFile(master, ci.Fid, ""); e != nil {
deleteError++
glog.V(0).Infoln("delete error:", e, ci.Fid)
}
}
if deleteError > 0 {
return errors.New("Not all chunks deleted.")
}
return nil
}
//func (cm *ChunkManifest) StoredHelper() error {
// return nil
//}
func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, e error) {
req, err := http.NewRequest("GET", fileUrl, nil)
if err != nil {
return written, err
}
if offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
resp, err := util.Do(req)
if err != nil {
return written, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusRequestedRangeNotSatisfiable:
return written, ErrInvalidRange
case http.StatusOK:
if offset > 0 {
return written, ErrRangeRequestsNotSupported
}
case http.StatusPartialContent:
break
default:
return written, fmt.Errorf("Read chunk needle error: [%d] %s", resp.StatusCode, fileUrl)
}
return io.Copy(w, resp.Body)
}
func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) {
var err error
switch whence {
case 0:
case 1:
offset += cf.pos
case 2:
offset = cf.Manifest.Size - offset
}
if offset > cf.Manifest.Size {
err = ErrInvalidRange
}
if cf.pos != offset {
cf.Close()
}
cf.pos = offset
return cf.pos, err
}
func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) {
cm := cf.Manifest
chunkIndex := -1
chunkStartOffset := int64(0)
for i, ci := range cm.Chunks {
if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size {
chunkIndex = i
chunkStartOffset = cf.pos - ci.Offset
break
}
}
if chunkIndex < 0 {
return n, ErrInvalidRange
}
for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ {
ci := cm.Chunks[chunkIndex]
// if we need read date from local volume server first?
fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid)
if lookupError != nil {
return n, lookupError
}
if wn, e := readChunkNeedle(fileUrl, w, chunkStartOffset); e != nil {
return n, e
} else {
n += wn
cf.pos += wn
}
chunkStartOffset = 0
}
return n, nil
}
func (cf *ChunkedFileReader) ReadAt(p []byte, off int64) (n int, err error) {
cf.Seek(off, 0)
return cf.Read(p)
}
func (cf *ChunkedFileReader) Read(p []byte) (int, error) {
return cf.getPipeReader().Read(p)
}
func (cf *ChunkedFileReader) Close() (e error) {
cf.mutex.Lock()
defer cf.mutex.Unlock()
return cf.closePipe()
}
func (cf *ChunkedFileReader) closePipe() (e error) {
if cf.pr != nil {
if err := cf.pr.Close(); err != nil {
e = err
}
}
cf.pr = nil
if cf.pw != nil {
if err := cf.pw.Close(); err != nil {
e = err
}
}
cf.pw = nil
return e
}
func (cf *ChunkedFileReader) getPipeReader() io.Reader {
cf.mutex.Lock()
defer cf.mutex.Unlock()
if cf.pr != nil && cf.pw != nil {
return cf.pr
}
cf.closePipe()
cf.pr, cf.pw = io.Pipe()
go func(pw *io.PipeWriter) {
_, e := cf.WriteTo(pw)
pw.CloseWithError(e)
}(cf.pw)
return cf.pr
}

2
go/storage/compress.go → go/operation/compress.go

@ -1,4 +1,4 @@
package storage
package operation
import (
"bytes"

10
go/operation/delete_content.go

@ -7,6 +7,8 @@ import (
"strings"
"sync"
"net/http"
"github.com/chrislusf/seaweedfs/go/security"
"github.com/chrislusf/seaweedfs/go/util"
)
@ -14,6 +16,7 @@ import (
type DeleteResult struct {
Fid string `json:"fid"`
Size int `json:"size"`
Status int `json:"status"`
Error string `json:"error,omitempty"`
}
@ -45,7 +48,11 @@ func DeleteFiles(master string, fileIds []string) (*DeleteFilesResult, error) {
for _, fileId := range fileIds {
vid, _, err := ParseFileId(fileId)
if err != nil {
ret.Results = append(ret.Results, DeleteResult{Fid: vid, Error: err.Error()})
ret.Results = append(ret.Results, DeleteResult{
Fid: vid,
Status: http.StatusBadRequest,
Error: err.Error()},
)
continue
}
if _, ok := vid_to_fileIds[vid]; !ok {
@ -76,6 +83,7 @@ func DeleteFiles(master string, fileIds []string) (*DeleteFilesResult, error) {
}
var wg sync.WaitGroup
for server, fidList := range server_to_fileIds {
wg.Add(1)
go func(server string, fidList []string) {

48
go/operation/submit.go

@ -9,6 +9,8 @@ import (
"strconv"
"strings"
"net/url"
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/security"
)
@ -114,25 +116,44 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret
if closer, ok := fi.Reader.(io.Closer); ok {
defer closer.Close()
}
baseName := path.Base(fi.FileName)
if maxMB > 0 && fi.FileSize > int64(maxMB*1024*1024) {
chunkSize := int64(maxMB * 1024 * 1024)
chunks := fi.FileSize/chunkSize + 1
var fids []string
cm := ChunkManifest{
Name: baseName,
Size: fi.FileSize,
Mime: fi.MimeType,
Chunks: make([]*ChunkInfo, 0, chunks),
}
for i := int64(0); i < chunks; i++ {
id, count, e := upload_one_chunk(
fi.FileName+"-"+strconv.FormatInt(i+1, 10),
baseName+"-"+strconv.FormatInt(i+1, 10),
io.LimitReader(fi.Reader, chunkSize),
master, fi.Replication, fi.Collection, fi.Ttl,
jwt)
if e != nil {
// delete all uploaded chunks
cm.DeleteChunks(master)
return 0, e
}
fids = append(fids, id)
cm.Chunks = append(cm.Chunks,
&ChunkInfo{
Offset: i * chunkSize,
Size: int64(count),
Fid: id,
},
)
retSize += count
}
err = upload_file_id_list(fileUrl, fi.FileName+"-list", fids, jwt)
err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil {
// delete all uploaded chunks
cm.DeleteChunks(master)
}
} else {
ret, e := Upload(fileUrl, fi.FileName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
ret, e := Upload(fileUrl, baseName, fi.Reader, fi.IsGzipped, fi.MimeType, jwt)
if e != nil {
return 0, e
}
@ -158,10 +179,17 @@ func upload_one_chunk(filename string, reader io.Reader, master,
return fid, uploadResult.Size, nil
}
func upload_file_id_list(fileUrl, filename string, fids []string, jwt security.EncodedJwt) error {
var buf bytes.Buffer
buf.WriteString(strings.Join(fids, "\n"))
glog.V(4).Info("Uploading final list ", filename, " to ", fileUrl, "...")
_, e := Upload(fileUrl, filename, &buf, false, "text/plain", jwt)
func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt security.EncodedJwt) error {
buf, e := manifest.GetData()
if e != nil {
return e
}
bufReader := bytes.NewReader(buf)
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "1")
u.RawQuery = q.Encode()
_, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", jwt)
return e
}

16
go/storage/needle.go

@ -15,6 +15,7 @@ import (
"github.com/chrislusf/seaweedfs/go/glog"
"github.com/chrislusf/seaweedfs/go/images"
"github.com/chrislusf/seaweedfs/go/util"
"github.com/chrislusf/seaweedfs/go/operation"
)
const (
@ -52,7 +53,7 @@ func (n *Needle) String() (str string) {
return
}
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, ttl *TTL, e error) {
func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string, isGzipped bool, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) {
form, fe := r.MultipartReader()
if fe != nil {
glog.V(0).Infoln("MultipartReader [ERROR]", fe)
@ -117,8 +118,8 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
}
if part.Header.Get("Content-Encoding") == "gzip" {
isGzipped = true
} else if IsGzippable(ext, mtype) {
if data, e = GzipData(data); e != nil {
} else if operation.IsGzippable(ext, mtype) {
if data, e = operation.GzipData(data); e != nil {
return
}
isGzipped = true
@ -132,12 +133,13 @@ func ParseUpload(r *http.Request) (fileName string, data []byte, mimeType string
}
modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64)
ttl, _ = ReadTTL(r.FormValue("ttl"))
isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm"))
return
}
func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
fname, mimeType, isGzipped := "", "", false
fname, mimeType, isGzipped, isChunkedFile := "", "", false, false
n = new(Needle)
fname, n.Data, mimeType, isGzipped, n.LastModified, n.Ttl, e = ParseUpload(r)
fname, n.Data, mimeType, isGzipped, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r)
if e != nil {
return
}
@ -160,6 +162,10 @@ func NewNeedle(r *http.Request, fixJpgOrientation bool) (n *Needle, e error) {
n.SetHasTtl()
}
if isChunkedFile {
n.SetChunkManifest()
}
if fixJpgOrientation {
loweredName := strings.ToLower(fname)
if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") {

9
go/storage/needle_read_write.go

@ -16,6 +16,7 @@ const (
FlagHasMime = 0x04
FlagHasLastModifiedDate = 0x08
FlagHasTtl = 0x10
FlagChunkManifest = 0x80
LastModifiedBytesLength = 5
TtlBytesLength = 2
)
@ -280,3 +281,11 @@ func (n *Needle) HasTtl() bool {
func (n *Needle) SetHasTtl() {
n.Flags = n.Flags | FlagHasTtl
}
func (n *Needle) IsChunkedManifest() bool {
return n.Flags&FlagChunkManifest > 0
}
func (n *Needle) SetChunkManifest() {
n.Flags = n.Flags | FlagChunkManifest
}

1
go/topology/volume_location_list.go

@ -17,6 +17,7 @@ func (dnll *VolumeLocationList) String() string {
}
func (dnll *VolumeLocationList) Head() *DataNode {
//mark first node as master volume
return dnll.list[0]
}

16
go/util/http_util.go

@ -9,7 +9,10 @@ import (
"net/url"
"strings"
"encoding/json"
"github.com/chrislusf/seaweedfs/go/security"
"github.com/syndtr/goleveldb/leveldb/errors"
)
var (
@ -79,10 +82,21 @@ func Delete(url string, jwt security.EncodedJwt) error {
return e
}
defer resp.Body.Close()
if _, err := ioutil.ReadAll(resp.Body); err != nil {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
switch resp.StatusCode {
case http.StatusNotFound, http.StatusAccepted, http.StatusOK:
return nil
}
m := make(map[string]interface{})
if e := json.Unmarshal(body, m); e == nil {
if s, ok := m["error"].(string); ok {
return errors.New(s)
}
}
return errors.New(string(body))
}
func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error {

2
go/weed/weed_server/common.go

@ -86,7 +86,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
}
debug("parsing upload file...")
fname, data, mimeType, isGzipped, lastModified, _, pe := storage.ParseUpload(r)
fname, data, mimeType, isGzipped, lastModified, _, _, pe := storage.ParseUpload(r)
if pe != nil {
writeJsonError(w, r, http.StatusBadRequest, pe)
return

155
go/weed/weed_server/volume_server_handlers_read.go

@ -81,6 +81,11 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
}
w.Header().Set("Etag", etag)
if vs.tryHandleChunkedFile(n, filename, w, r) {
return
}
if n.NameSize > 0 && filename == "" {
filename = string(n.Name)
dotIndex := strings.LastIndex(filename, ".")
@ -109,7 +114,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
} else {
if n.Data, err = storage.UnGzipData(n.Data); err != nil {
if n.Data, err = operation.UnGzipData(n.Data); err != nil {
glog.V(0).Infoln("lookup error:", err, r.URL.Path)
}
}
@ -135,7 +140,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.Itoa(len(n.Data)))
if _, e = w.Write(n.Data); e != nil {
glog.V(0).Infoln("response write error:", e)
glog.V(4).Infoln("response write error:", e)
}
return
}
@ -176,11 +181,11 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
w.Header().Set("Content-Range", ra.contentRange(size))
w.WriteHeader(http.StatusPartialContent)
if _, e = w.Write(n.Data[ra.start : ra.start+ra.length]); e != nil {
glog.V(0).Infoln("response write error:", e)
glog.V(2).Infoln("response write error:", e)
}
return
}
// process mulitple ranges
// process multiple ranges
for _, ra := range ranges {
if ra.start > size {
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
@ -215,3 +220,145 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
io.CopyN(w, sendContent, sendSize)
}
func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) {
if !n.IsChunkedManifest() {
return false
}
raw, _ := strconv.ParseBool(r.FormValue("raw"))
if raw {
return false
}
processed = true
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
if e != nil {
glog.V(0).Infoln("load chunked manifest error:", e)
return false
}
ext := ""
if fileName == "" && chunkManifest.Name != "" {
fileName = chunkManifest.Name
dotIndex := strings.LastIndex(fileName, ".")
if dotIndex > 0 {
ext = fileName[dotIndex:]
}
}
mtype := ""
if ext != "" {
mtype = mime.TypeByExtension(ext)
}
if chunkManifest.Mime != "" {
mt := chunkManifest.Mime
if !strings.HasPrefix(mt, "application/octet-stream") {
mtype = mt
}
}
if mtype != "" {
w.Header().Set("Content-Type", mtype)
}
if fileName != "" {
w.Header().Set("Content-Disposition", `filename="`+fileNameEscaper.Replace(fileName)+`"`)
}
w.Header().Set("X-File-Store", "chunked")
w.Header().Set("Accept-Ranges", "bytes")
if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(chunkManifest.Size, 10))
return true
}
chunkedFileReader := &operation.ChunkedFileReader{
Manifest: chunkManifest,
Master: vs.GetMasterNode(),
}
defer chunkedFileReader.Close()
rangeReq := r.Header.Get("Range")
if rangeReq == "" {
w.Header().Set("Content-Length", strconv.FormatInt(chunkManifest.Size, 10))
if _, e = io.Copy(w, chunkedFileReader); e != nil {
glog.V(2).Infoln("response write error:", e)
}
return true
}
//the rest is dealing with partial content request
//mostly copy from src/pkg/net/http/fs.go
size := chunkManifest.Size
ranges, err := parseRange(rangeReq, size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
if sumRangesSize(ranges) > size {
// The total number of bytes in all the ranges
// is larger than the size of the file by
// itself, so this is probably an attack, or a
// dumb client. Ignore the range request.
ranges = nil
return
}
if len(ranges) == 0 {
return
}
if len(ranges) == 1 {
// RFC 2616, Section 14.16:
// "When an HTTP message includes the content of a single
// range (for example, a response to a request for a
// single range, or to a request for a set of ranges
// that overlap without any holes), this content is
// transmitted with a Content-Range header, and a
// Content-Length header showing the number of bytes
// actually transferred.
// ...
// A response to a request for a single range MUST NOT
// be sent using the multipart/byteranges media type."
ra := ranges[0]
w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
w.Header().Set("Content-Range", ra.contentRange(size))
w.WriteHeader(http.StatusPartialContent)
if _, e = chunkedFileReader.Seek(ra.start, 0); e != nil {
glog.V(2).Infoln("chunkedFileReader Seek error:", e)
}
if _, e = io.CopyN(w, chunkedFileReader, ra.length); e != nil {
glog.V(2).Infoln("response write error:", e)
}
return
}
// process multiple ranges
for _, ra := range ranges {
if ra.start > size {
http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable)
return
}
}
sendSize := rangesMIMESize(ranges, mtype, size)
pr, pw := io.Pipe()
mw := multipart.NewWriter(pw)
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
sendContent := pr
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
go func() {
for _, ra := range ranges {
part, err := mw.CreatePart(ra.mimeHeader(mtype, size))
if err != nil {
pw.CloseWithError(err)
return
}
if _, e = chunkedFileReader.Seek(ra.start, 0); e != nil {
glog.V(2).Infoln("response write error:", e)
}
if _, err = io.CopyN(part, chunkedFileReader, ra.length); err != nil {
pw.CloseWithError(err)
return
}
}
mw.Close()
pw.Close()
}()
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(http.StatusPartialContent)
io.CopyN(w, sendContent, sendSize)
return
}

64
go/weed/weed_server/volume_server_handlers_write.go

@ -53,9 +53,8 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
glog.V(2).Infoln("deleting", n)
cookie := n.Cookie
count, ok := vs.store.ReadVolumeNeedle(volumeId, n)
if ok != nil {
if _, ok := vs.store.ReadVolumeNeedle(volumeId, n); ok != nil {
m := make(map[string]uint32)
m["size"] = 0
writeJsonQuiet(w, r, http.StatusNotFound, m)
@ -64,14 +63,31 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
if n.Cookie != cookie {
glog.V(0).Infoln("delete", r.URL.Path, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
writeJsonError(w, r, http.StatusBadRequest, errors.New("File Random Cookie does not match."))
return
}
count := int64(n.Size)
if n.IsChunkedManifest() {
chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped())
if e != nil {
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Load chunks manifest error: "+e.Error()))
return
}
// make sure all chunks had deleted before delete manifest
if e := chunkManifest.DeleteChunks(vs.GetMasterNode()); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Delete chunks error: "+e.Error()))
return
}
count = chunkManifest.Size
}
ret := topology.ReplicatedDelete(vs.GetMasterNode(), vs.store, volumeId, n, r)
if ret != 0 {
m := make(map[string]uint32)
m["size"] = uint32(count)
m := make(map[string]int64)
m["size"] = count
writeJsonQuiet(w, r, http.StatusAccepted, m)
} else {
writeJsonError(w, r, http.StatusInternalServerError, errors.New("Deletion Failed."))
@ -86,7 +102,10 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
for _, fid := range r.Form["fid"] {
vid, id_cookie, err := operation.ParseFileId(fid)
if err != nil {
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusBadRequest,
Error: err.Error()})
continue
}
n := new(storage.Needle)
@ -95,18 +114,45 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
glog.V(4).Infoln("batch deleting", n)
cookie := n.Cookie
if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil {
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusNotFound,
Error: err.Error(),
})
continue
}
if n.IsChunkedManifest() {
//Don't allow delete manifest in batch delete mode
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusNotAcceptable,
Error: "ChunkManifest: not allow.",
})
continue
}
if n.Cookie != cookie {
ret = append(ret, operation.DeleteResult{Fid: fid, Error: "File Random Cookie does not match."})
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusBadRequest,
Error: "File Random Cookie does not match.",
})
glog.V(0).Infoln("deleting", fid, "with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
return
}
if size, err := vs.store.Delete(volumeId, n); err != nil {
ret = append(ret, operation.DeleteResult{Fid: fid, Error: err.Error()})
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusInternalServerError,
Error: err.Error()},
)
} else {
ret = append(ret, operation.DeleteResult{Fid: fid, Size: int(size)})
ret = append(ret, operation.DeleteResult{
Fid: fid,
Status: http.StatusAccepted,
Size: int(size)},
)
}
}

Loading…
Cancel
Save