Browse Source

volume: add "readBufSize" option to customize read optimization (#3702)

* simplify a bit

* feat: volume: add "readBufSize" option to customize read optimization

* refactor : redbufSIze -> readBufferSize

* simplify a bit

* simplify a bit
pull/3624/merge
famosss 2 years ago
committed by GitHub
parent
commit
d949a238b8
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      weed/command/server.go
  2. 3
      weed/command/volume.go
  3. 3
      weed/server/volume_server.go
  4. 1
      weed/server/volume_server_handlers_read.go
  5. 4
      weed/storage/store.go
  6. 2
      weed/storage/volume_read.go

1
weed/command/server.go

@ -132,6 +132,7 @@ func init() {
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
serverOptions.v.readBufferSize = cmdServer.Flag.Int("volume.readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")

3
weed/command/volume.go

@ -67,6 +67,7 @@ type VolumeServerOptions struct {
// pulseSeconds *int
inflightUploadDataTimeout *time.Duration
hasSlowRead *bool
readBufferSize *int
}
func init() {
@ -98,6 +99,7 @@ func init() {
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
v.readBufferSize = cmdVolume.Flag.Int("readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.")
}
var cmdVolume = &Command{
@ -246,6 +248,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
int64(*v.concurrentDownloadLimitMB)*1024*1024,
*v.inflightUploadDataTimeout,
*v.hasSlowRead,
*v.readBufferSize,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)

3
weed/server/volume_server.go

@ -29,6 +29,7 @@ type VolumeServer struct {
inFlightDownloadDataLimitCond *sync.Cond
inflightUploadDataTimeout time.Duration
hasSlowRead bool
readBufferSize int
SeedMasterNodes []pb.ServerAddress
currentMaster pb.ServerAddress
@ -66,6 +67,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit int64,
inflightUploadDataTimeout time.Duration,
hasSlowRead bool,
readBufferSize int,
) *VolumeServer {
v := util.GetViper()
@ -96,6 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit: concurrentDownloadLimit,
inflightUploadDataTimeout: inflightUploadDataTimeout,
hasSlowRead: hasSlowRead,
readBufferSize: readBufferSize,
}
vs.SeedMasterNodes = masterNodes

1
weed/server/volume_server_handlers_read.go

@ -118,6 +118,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
readOption := &storage.ReadOption{
ReadDeleted: r.FormValue("readDeleted") == "true",
HasSlowRead: vs.hasSlowRead,
ReadBufferSize: vs.readBufferSize,
}
var count int

4
weed/storage/store.go

@ -44,6 +44,10 @@ type ReadOption struct {
// * read requests should complete asap, not blocking other requests.
// * write requests may see high latency when downloading large files.
HasSlowRead bool
// increasing ReadBufferSize can reduce the number of get locks times and shorten read P99 latency.
// but will increase memory usage a bit. Use with hasSlowRead normally.
ReadBufferSize int
}
/*

2
weed/storage/volume_read.go

@ -136,7 +136,7 @@ func (v *Volume) readNeedleDataInto(n *needle.Needle, readOption *ReadOption, wr
actualOffset += int64(MaxPossibleVolumeSize)
}
buf := mem.Allocate(min(1024*1024, int(size)))
buf := mem.Allocate(min(readOption.ReadBufferSize, int(size)))
defer mem.Free(buf)
// read needle data

Loading…
Cancel
Save