Browse Source

volume server: rename readBufferSize to readBufferSizeMB

pull/3719/head
chrislu 2 years ago
parent
commit
10d5b4b32b
  1. 2
      weed/command/server.go
  2. 6
      weed/command/volume.go
  3. 6
      weed/server/volume_server.go
  4. 2
      weed/server/volume_server_handlers_read.go

2
weed/command/server.go

@ -132,7 +132,7 @@ func init() {
serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files")
serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") serverOptions.v.inflightUploadDataTimeout = cmdServer.Flag.Duration("volume.inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") serverOptions.v.hasSlowRead = cmdServer.Flag.Bool("volume.hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
serverOptions.v.readBufferSize = cmdServer.Flag.Int("volume.readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally")
serverOptions.v.readBufferSizeMB = cmdServer.Flag.Int("volume.readBufferSizeMB", 4, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port") s3Options.portGrpc = cmdServer.Flag.Int("s3.port.grpc", 0, "s3 server grpc listen port")

6
weed/command/volume.go

@ -67,7 +67,7 @@ type VolumeServerOptions struct {
// pulseSeconds *int // pulseSeconds *int
inflightUploadDataTimeout *time.Duration inflightUploadDataTimeout *time.Duration
hasSlowRead *bool hasSlowRead *bool
readBufferSize *int
readBufferSizeMB *int
} }
func init() { func init() {
@ -99,7 +99,7 @@ func init() {
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers") v.inflightUploadDataTimeout = cmdVolume.Flag.Duration("inflightUploadDataTimeout", 60*time.Second, "inflight upload data wait timeout of volume servers")
v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.") v.hasSlowRead = cmdVolume.Flag.Bool("hasSlowRead", false, "<experimental> if true, this prevents slow reads from blocking other requests, but large file read P99 latency will increase.")
v.readBufferSize = cmdVolume.Flag.Int("readBufferSize", 1024 * 1024, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.")
v.readBufferSizeMB = cmdVolume.Flag.Int("readBufferSizeMB", 4, "<experimental> larger values can optimize query performance but will increase some memory usage,Use with hasSlowRead normally.")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
@ -248,7 +248,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
int64(*v.concurrentDownloadLimitMB)*1024*1024, int64(*v.concurrentDownloadLimitMB)*1024*1024,
*v.inflightUploadDataTimeout, *v.inflightUploadDataTimeout,
*v.hasSlowRead, *v.hasSlowRead,
*v.readBufferSize,
*v.readBufferSizeMB,
) )
// starting grpc server // starting grpc server
grpcS := v.startGrpcService(volumeServer) grpcS := v.startGrpcService(volumeServer)

6
weed/server/volume_server.go

@ -29,7 +29,7 @@ type VolumeServer struct {
inFlightDownloadDataLimitCond *sync.Cond inFlightDownloadDataLimitCond *sync.Cond
inflightUploadDataTimeout time.Duration inflightUploadDataTimeout time.Duration
hasSlowRead bool hasSlowRead bool
readBufferSize int
readBufferSizeMB int
SeedMasterNodes []pb.ServerAddress SeedMasterNodes []pb.ServerAddress
currentMaster pb.ServerAddress currentMaster pb.ServerAddress
@ -67,7 +67,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit int64, concurrentDownloadLimit int64,
inflightUploadDataTimeout time.Duration, inflightUploadDataTimeout time.Duration,
hasSlowRead bool, hasSlowRead bool,
readBufferSize int,
readBufferSizeMB int,
) *VolumeServer { ) *VolumeServer {
v := util.GetViper() v := util.GetViper()
@ -98,7 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
concurrentDownloadLimit: concurrentDownloadLimit, concurrentDownloadLimit: concurrentDownloadLimit,
inflightUploadDataTimeout: inflightUploadDataTimeout, inflightUploadDataTimeout: inflightUploadDataTimeout,
hasSlowRead: hasSlowRead, hasSlowRead: hasSlowRead,
readBufferSize: readBufferSize,
readBufferSizeMB: readBufferSizeMB,
} }
vs.SeedMasterNodes = masterNodes vs.SeedMasterNodes = masterNodes

2
weed/server/volume_server_handlers_read.go

@ -118,7 +118,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
readOption := &storage.ReadOption{ readOption := &storage.ReadOption{
ReadDeleted: r.FormValue("readDeleted") == "true", ReadDeleted: r.FormValue("readDeleted") == "true",
HasSlowRead: vs.hasSlowRead, HasSlowRead: vs.hasSlowRead,
ReadBufferSize: vs.readBufferSize,
ReadBufferSize: vs.readBufferSizeMB * 1024 * 1024,
} }
var count int var count int

Loading…
Cancel
Save