From 3cc084269c47d72df463760445920b45819e9feb Mon Sep 17 00:00:00 2001 From: Lei Liu Date: Sun, 29 Sep 2019 14:17:37 +0800 Subject: [PATCH 1/2] master api: return http 404 when volumeId not exist Signed-off-by: Lei Liu --- .../load_test_leveldb/load_test_leveldb.go | 13 +-- .../remove_duplicate_fids.go | 109 +++++++++--------- unmaintained/see_dat/see_dat.go | 4 +- weed/server/filer_server_handlers_write.go | 2 +- weed/server/master_server_handlers.go | 4 +- weed/shell/command_volume_list.go | 4 +- 6 files changed, 66 insertions(+), 70 deletions(-) diff --git a/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go index 518a5081c..43dfb0e21 100644 --- a/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go +++ b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go @@ -16,7 +16,7 @@ import ( ) var ( - dir = flag.String("dir", "./t", "directory to store level db files") + dir = flag.String("dir", "./t", "directory to store level db files") useHash = flag.Bool("isHash", false, "hash the path as the key") dbCount = flag.Int("dbCount", 1, "the number of leveldb") ) @@ -36,7 +36,7 @@ func main() { var dbs []*leveldb.DB var chans []chan string - for d := 0 ; d < *dbCount; d++ { + for d := 0; d < *dbCount; d++ { dbFolder := fmt.Sprintf("%s/%02d", *dir, d) os.MkdirAll(dbFolder, 0755) db, err := leveldb.OpenFile(dbFolder, opts) @@ -49,9 +49,9 @@ func main() { } var wg sync.WaitGroup - for d := 0 ; d < *dbCount; d++ { + for d := 0; d < *dbCount; d++ { wg.Add(1) - go func(d int){ + go func(d int) { defer wg.Done() ch := chans[d] @@ -60,14 +60,13 @@ func main() { for p := range ch { if *useHash { insertAsHash(db, p) - }else{ + } else { insertAsFullPath(db, p) } } }(d) } - counter := int64(0) lastResetTime := time.Now() @@ -101,7 +100,7 @@ func main() { } } - for d := 0 ; d < *dbCount; d++ { + for d := 0; d < *dbCount; d++ { close(chans[d]) } diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go index 5716ffa90..4b37a64fb 100644 --- a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -1,92 +1,91 @@ package main import ( - "flag" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/chrislusf/seaweedfs/weed/storage/needle" - "os" - "path/filepath" - - "fmt" + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) var ( - volumePath = flag.String("dir", "/tmp", "data directory to store files") - volumeCollection = flag.String("collection", "", "the volume collection name") - volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") + volumePath = flag.String("dir", "/tmp", "data directory to store files") + volumeCollection = flag.String("collection", "", "the volume collection name") + volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") ) -func Checksum(n* needle.Needle) string { - return fmt.Sprintf("%s%x", n.Id, n.Cookie) +func Checksum(n *needle.Needle) string { + return fmt.Sprintf("%s%x", n.Id, n.Cookie) } type VolumeFileScanner4SeeDat struct { - version needle.Version - block storage.SuperBlock + version needle.Version + block storage.SuperBlock - dir string - hashes map[string]bool - dat * os.File + dir string + hashes map[string]bool + dat *os.File } func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() - scanner.block = superBlock - return nil + scanner.version = superBlock.Version() + scanner.block = superBlock + return nil } func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { - return true + return true } func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64) error { - if scanner.dat == nil { - newDatFile, err := os.Create(filepath.Join(*volumePath, "dat_fixed")) - if err != nil { - glog.Fatalf("Write New Volume Data %v", err) + if scanner.dat == nil { + newDatFile, err := os.Create(filepath.Join(*volumePath, "dat_fixed")) + if err != nil { + glog.Fatalf("Write New Volume Data %v", err) + } + scanner.dat = newDatFile + scanner.dat.Write(scanner.block.Bytes()) } - scanner.dat = newDatFile - scanner.dat.Write(scanner.block.Bytes()) - } - checksum := Checksum(n) + checksum := Checksum(n) - if scanner.hashes[checksum] { - glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset) - return nil - } - scanner.hashes[checksum] = true + if scanner.hashes[checksum] { + glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset) + return nil + } + scanner.hashes[checksum] = true - _, s, _, e := n.Append(scanner.dat, scanner.version) - fmt.Printf("size %d error %v\n", s, e) + _, s, _, e := n.Append(scanner.dat, scanner.version) + fmt.Printf("size %d error %v\n", s, e) - return nil + return nil } func main() { - flag.Parse() + flag.Parse() - vid := needle.VolumeId(*volumeId) + vid := needle.VolumeId(*volumeId) - outpath, _ := filepath.Abs(filepath.Dir(os.Args[0])) + outpath, _ := filepath.Abs(filepath.Dir(os.Args[0])) - scanner := &VolumeFileScanner4SeeDat{ - dir: filepath.Join(outpath, "out"), - hashes: map[string]bool{}, - } + scanner := &VolumeFileScanner4SeeDat{ + dir: filepath.Join(outpath, "out"), + hashes: map[string]bool{}, + } - if _, err := os.Stat(scanner.dir); err != nil { - if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil { - glog.Fatalf("could not create output dir : %s", err) - } - } + if _, err := os.Stat(scanner.dir); err != nil { + if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil { + glog.Fatalf("could not create output dir : %s", err) + } + } - err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Reading Volume File [ERROR] %s\n", err) - } + err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) + if err != nil { + glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + } } diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index e8e54fd4f..e07704fc6 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -2,12 +2,11 @@ package main import ( "flag" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" - - "time" ) var ( @@ -45,5 +44,4 @@ func main() { if err != nil { glog.Fatalf("Reading Volume File [ERROR] %s\n", err) } - } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index b419c51af..5d95a5d7e 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -275,7 +275,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError,true) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, true) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) writeJsonError(w, r, http.StatusInternalServerError, err) diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index 93f983375..728c32076 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -48,7 +48,7 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) collection := r.FormValue("collection") //optional, but can be faster if too many collections location := ms.findVolumeLocation(collection, vid) httpStatus := http.StatusOK - if location.Error != "" { + if location.Error != "" || location.Locations == nil { httpStatus = http.StatusNotFound } else { forRead := r.FormValue("read") @@ -60,7 +60,7 @@ func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) // findVolumeLocation finds the volume location from master topo if it is leader, // or from master client if not leader -func (ms *MasterServer) findVolumeLocation(collection string, vid string) operation.LookupResult { +func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.LookupResult { var locations []operation.Location var err error if ms.Topo.IsLeader() { diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index 134580ffe..91b5a0d32 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -98,7 +98,7 @@ func writeDataNodeInfo(writer io.Writer, t *master_pb.DataNodeInfo) statistics { } func writeVolumeInformationMessage(writer io.Writer, t *master_pb.VolumeInformationMessage) statistics { fmt.Fprintf(writer, " volume %+v \n", t) - return newStatiscis(t) + return newStatistics(t) } type statistics struct { @@ -108,7 +108,7 @@ type statistics struct { DeletedBytes uint64 } -func newStatiscis(t *master_pb.VolumeInformationMessage) statistics { +func newStatistics(t *master_pb.VolumeInformationMessage) statistics { return statistics{ Size: t.Size, FileCount: t.FileCount, From 2cb348a2c2d31775578d40024ffcf5123724e869 Mon Sep 17 00:00:00 2001 From: Lei Liu Date: Sun, 29 Sep 2019 21:05:37 +0800 Subject: [PATCH 2/2] s3 api: fix listbucket common_prefixes issue Signed-off-by: Lei Liu --- weed/s3api/filer_multipart.go | 5 +++++ weed/s3api/s3api_objects_list_handlers.go | 8 +++++--- weed/s3api/s3api_xsd_generated.go | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index c8fe05645..3bf4aafac 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -91,6 +91,11 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C } dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName) + // remove suffix '/' + if strings.HasSuffix(dirName, "/") { + dirName = dirName[:len(dirName)-1] + } + err = s3a.mkFile(ctx, dirName, entryName, finalParts) if err != nil { diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 4053913fb..1fc8b6b37 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -125,9 +125,11 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr } lastEntryName = entry.Name if entry.IsDirectory { - commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), - }) + if entry.Name != ".uploads" { + commonPrefixes = append(commonPrefixes, PrefixEntry{ + Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), + }) + } } else { contents = append(contents, ListEntry{ Key: fmt.Sprintf("%s%s", dir, entry.Name), diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index 573c09ede..9d62afc4e 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -675,7 +675,7 @@ type PostResponse struct { } type PrefixEntry struct { - Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` + Prefix string `xml:"Prefix"` } type PutObject struct {