Browse Source

Merge branch 'master' of https://github.com/seaweedfs/seaweedfs

pull/3719/head
chrislu 2 years ago
parent
commit
3fc261d27c
  1. 1
      weed/command/server.go
  2. 3
      weed/command/volume.go
  3. 2
      weed/server/filer_server_handlers_write.go
  4. 3
      weed/server/volume_server.go
  5. 1
      weed/server/volume_server_handlers_read.go
  6. 2
      weed/shell/command_fs_configure.go
  7. 2
      weed/shell/command_lock_unlock.go
  8. 2
      weed/shell/command_remote_meta_sync.go
  9. 2
      weed/shell/command_s3_circuitbreaker_test.go
  10. 2
      weed/shell/command_s3_clean_uploads.go
  11. 2
      weed/storage/backend/memory_map/memory_map_windows.go
  12. 2
      weed/storage/needle/file_id_test.go
  13. 2
      weed/storage/needle/needle_parse_upload.go
  14. 2
      weed/storage/needle/needle_write.go
  15. 6
      weed/storage/store.go
  16. 2
      weed/storage/store_ec.go
  17. 2
      weed/storage/store_vacuum.go
  18. 2
      weed/storage/volume_read.go
  19. 2
      weed/storage/volume_vacuum.go
  20. 2
      weed/storage/volume_write.go

1
weed/command/server.go

@ -132,6 +132,7 @@ func init() {
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
serverOptions.v.readBufferSize = cmdServer.Flag.Int("volume.readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")

3
weed/command/volume.go

@ -67,6 +67,7 @@ type VolumeServerOptions struct {
// pulseSeconds *int
inflightUploadDataTimeout *time.Duration
hasSlowRead *bool
readBufferSize *int
}
func init() {
@ -98,6 +99,7 @@ func init() {
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
v.readBufferSize = cmdVolume.Flag.Int("readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.")
}
var cmdVolume = &Command{
@ -246,6 +248,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
int64(*v.concurrentDownloadLimitMB)*1024*1024,
*v.inflightUploadDataTimeout,
*v.hasSlowRead,
*v.readBufferSize,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)

2
weed/server/filer_server_handlers_write.go

@ -195,6 +195,8 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
httpStatus := http.StatusInternalServerError
if err == filer_pb.ErrNotFound {
httpStatus = http.StatusNoContent
writeJsonQuiet(w, r, httpStatus, nil)
return
}
writeJsonError(w, r, httpStatus, err)
return

3
weed/server/volume_server.go

@ -29,6 +29,7 @@ type VolumeServer struct {
inFlightDownloadDataLimitCond *sync.Cond
inflightUploadDataTimeout time.Duration
hasSlowRead bool
readBufferSize int
SeedMasterNodes []pb.ServerAddress
currentMaster pb.ServerAddress
@ -66,6 +67,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit int64,
inflightUploadDataTimeout time.Duration,
hasSlowRead bool,
readBufferSize int,
) *VolumeServer {
v := util.GetViper()
@ -96,6 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit: concurrentDownloadLimit,
inflightUploadDataTimeout: inflightUploadDataTimeout,
hasSlowRead: hasSlowRead,
readBufferSize: readBufferSize,
}
vs.SeedMasterNodes = masterNodes

1
weed/server/volume_server_handlers_read.go

@ -118,6 +118,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
readOption := &storage.ReadOption{
ReadDeleted: r.FormValue("readDeleted") == "true",
HasSlowRead: vs.hasSlowRead,
ReadBufferSize: vs.readBufferSize,
}
var count int

2
weed/shell/command_fs_configure.go

@ -98,7 +98,7 @@ func (c *commandFsConfigure) Do(args []string, commandEnv *CommandEnv, writer io
return fmt.Errorf("parse replication %s: %v", *replication, err)
}
if *volumeGrowthCount%rp.GetCopyCount() != 0 {
return fmt.Errorf("volumeGrowthCount %d should be devided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
return fmt.Errorf("volumeGrowthCount %d should be divided by replication copy count %d", *volumeGrowthCount, rp.GetCopyCount())
}
}

2
weed/shell/command_lock_unlock.go

@ -21,7 +21,7 @@ func (c *commandLock) Name() string {
func (c *commandLock) Help() string {
return `lock in order to exclusively manage the cluster
This is a blocking operation if there is alread another lock.
This is a blocking operation if there is already another lock.
`
}

2
weed/shell/command_remote_meta_sync.go

@ -96,7 +96,7 @@ After caching the file content, the entry.RemoteEntry will be
remoteEntry.LastLocalSyncTsNs == time.Now.UnixNano()
Attributes.FileSize = uint64(remoteEntry.RemoteSize)
Attributes.Mtime = remoteEntry.RemoteMtime
chunks = non-emtpy
chunks = non-empty
When "weed filer.remote.sync" to upload local changes to remote, the criteria is:

2
weed/shell/command_s3_circuitbreaker_test.go

@ -285,7 +285,7 @@ func TestCircuitBreakerShell(t *testing.T) {
t.Error(err)
}
if !reflect.DeepEqual(actual, expect) {
t.Fatal("result of s3 circuit breaker shell command is unexpect!")
t.Fatal("result of s3 circuit breaker shell command is unexpected!")
}
}
}

2
weed/shell/command_s3_clean_uploads.go

@ -60,7 +60,7 @@ func (c *commandS3CleanUploads) Do(args []string, commandEnv *CommandEnv, writer
for _, bucket := range buckets {
if err := c.cleanupUploads(commandEnv, writer, filerBucketsPath, bucket, *uploadedTimeAgo, signingKey); err != nil {
fmt.Fprintf(writer, fmt.Sprintf("failed cleanup uploads for backet %s: %v", bucket, err))
fmt.Fprintf(writer, fmt.Sprintf("failed cleanup uploads for bucket %s: %v", bucket, err))
}
}

2
weed/storage/backend/memory_map/memory_map_windows.go

@ -153,7 +153,7 @@ func allocate(hMapFile windows.Handle, offset uint64, length uint64, write bool)
mBuffer := MemoryBuffer{}
//align memory allocations to the minium virtal memory allocation size
//align memory allocations to the minium virtual memory allocation size
dwSysGran := systemInfo.dwAllocationGranularity
start := (offset / uint64(dwSysGran)) * uint64(dwSysGran)

2
weed/storage/needle/file_id_test.go

@ -15,7 +15,7 @@ func TestParseFileIdFromString(t *testing.T) {
fidStr1 = "100, 12345678"
_, err = ParseFileIdFromString(fidStr1)
if err == nil {
t.Errorf("%s : needlId invalid syntax", fidStr1)
t.Errorf("%s : needleId invalid syntax", fidStr1)
}
fidStr1 = "100,123456789"

2
weed/storage/needle/needle_parse_upload.go

@ -195,7 +195,7 @@ func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error
}
contentType := part.Header.Get("Content-Type")
if contentType != "" && contentType != "application/octet-stream" && mtype != contentType {
pu.MimeType = contentType // only return mime type if not deductable
pu.MimeType = contentType // only return mime type if not deducible
mtype = contentType
}

2
weed/storage/needle/needle_write.go

@ -128,7 +128,7 @@ func (n *Needle) Append(w backend.BackendStorageFile, version Version) (offset u
return
}
if offset >= MaxPossibleVolumeSize && n.Size.IsValid() {
err = fmt.Errorf("Volume Size %d Exeededs %d", offset, MaxPossibleVolumeSize)
err = fmt.Errorf("Volume Size %d Exceeded %d", offset, MaxPossibleVolumeSize)
return
}

6
weed/storage/store.go

@ -44,6 +44,10 @@ type ReadOption struct {
// * read requests should complete asap, not blocking other requests.
// * write requests may see high latency when downloading large files.
HasSlowRead bool
// increasing ReadBufferSize can reduce the number of get locks times and shorten read P99 latency.
// but will increase memory usage a bit. Use with hasSlowRead normally.
ReadBufferSize int
}
/*
@ -58,7 +62,7 @@ type Store struct {
GrpcPort int
PublicUrl string
Locations []*DiskLocation
dataCenter string // optional informaton, overwriting master setting if exists
dataCenter string // optional information, overwriting master setting if exists
rack string // optional information, overwriting master setting if exists
connected bool
NeedleMapKind NeedleMapKind

2
weed/storage/store_ec.go

@ -339,7 +339,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolum
ecVolume.ShardLocationsLock.RLock()
for shardId, locations := range ecVolume.ShardLocations {
// skip currnent shard or empty shard
// skip current shard or empty shard
if shardId == shardIdToRecover {
continue
}

2
weed/storage/store_vacuum.go

@ -10,7 +10,7 @@ import (
func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
if v := s.findVolume(volumeId); v != nil {
glog.V(3).Infof("volumd %d garbage level: %f", volumeId, v.garbageLevel())
glog.V(3).Infof("volume %d garbage level: %f", volumeId, v.garbageLevel())
return v.garbageLevel(), nil
}
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)

2
weed/storage/volume_read.go

@ -135,7 +135,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
actualOffset += int64(MaxPossibleVolumeSize)
}
buf := mem.Allocate(min(1024*1024, int(size)))
buf := mem.Allocate(min(readOption.ReadBufferSize, int(size)))
defer mem.Free(buf)
// read needle data

2
weed/storage/volume_vacuum.go

@ -167,7 +167,7 @@ func (v *Volume) CommitCompact() error {
if e = v.load(true, false, v.needleMapKind, 0); e != nil {
return e
}
glog.V(3).Infof("Finish commiting volume %d", v.Id)
glog.V(3).Infof("Finish committing volume %d", v.Id)
return nil
}

2
weed/storage/volume_write.go

@ -81,7 +81,7 @@ func removeVolumeFiles(filename string) {
// compaction
os.Remove(filename + ".cpd")
os.Remove(filename + ".cpx")
// level db indx file
// level db index file
os.RemoveAll(filename + ".ldb")
// marker for damaged or incomplete volume
os.Remove(filename + ".note")

Loading…
Cancel
Save