Browse Source

filer, volume: add concurrent upload size limit to avoid OOM

add some back pressure when writes are slow
pull/1960/head
Chris Lu 4 years ago
parent
commit
ac875976c0
  1. 3
      weed/command/filer.go
  2. 2
      weed/command/server.go
  3. 3
      weed/command/volume.go
  4. 5
      weed/server/filer_server.go
  5. 21
      weed/server/filer_server_handlers.go
  6. 8
      weed/server/volume_server.go
  7. 30
      weed/server/volume_server_handlers.go

3
weed/command/filer.go

@ -47,6 +47,7 @@ type FilerOptions struct {
metricsHttpPort *int metricsHttpPort *int
saveToFilerLimit *int saveToFilerLimit *int
defaultLevelDbDirectory *string defaultLevelDbDirectory *string
concurrentUploadLimitMB *int
} }
func init() { func init() {
@ -69,6 +70,7 @@ func init() {
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store")
f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory")
f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
// start s3 on filer // start s3 on filer
filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway") filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway")
@ -174,6 +176,7 @@ func (fo *FilerOptions) startFiler() {
Cipher: *fo.cipher, Cipher: *fo.cipher,
SaveToFilerLimit: *fo.saveToFilerLimit, SaveToFilerLimit: *fo.saveToFilerLimit,
Filers: peers, Filers: peers,
ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024,
}) })
if nfs_err != nil { if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err) glog.Fatalf("Filer startup error: %v", nfs_err)

2
weed/command/server.go

@ -99,6 +99,7 @@ func init() {
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.") filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.")
filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@ -108,6 +109,7 @@ func init() {
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory")
serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")

3
weed/command/volume.go

@ -56,6 +56,7 @@ type VolumeServerOptions struct {
memProfile *string memProfile *string
compactionMBPerSecond *int compactionMBPerSecond *int
fileSizeLimitMB *int fileSizeLimitMB *int
concurrentUploadLimitMB *int
minFreeSpacePercents []float32 minFreeSpacePercents []float32
pprof *bool pprof *bool
preStopSeconds *int preStopSeconds *int
@ -85,6 +86,7 @@ func init() {
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory")
v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files")
@ -237,6 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond, *v.compactionMBPerSecond,
*v.fileSizeLimitMB, *v.fileSizeLimitMB,
int64(*v.concurrentUploadLimitMB) * 1024 * 1024,
) )
// starting grpc server // starting grpc server
grpcS := v.startGrpcService(volumeServer) grpcS := v.startGrpcService(volumeServer)

5
weed/server/filer_server.go

@ -61,6 +61,7 @@ type FilerOption struct {
Cipher bool Cipher bool
SaveToFilerLimit int SaveToFilerLimit int
Filers []string Filers []string
ConcurrentUploadLimit int64
} }
type FilerServer struct { type FilerServer struct {
@ -79,6 +80,9 @@ type FilerServer struct {
brokers map[string]map[string]bool brokers map[string]map[string]bool
brokersLock sync.Mutex brokersLock sync.Mutex
inFlightDataSize int64
inFlightDataLimitCond *sync.Cond
} }
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
@ -87,6 +91,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
option: option, option: option,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"),
brokers: make(map[string]map[string]bool), brokers: make(map[string]map[string]bool),
inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)),
} }
fs.listenersCond = sync.NewCond(&fs.listenersLock) fs.listenersCond = sync.NewCond(&fs.listenersLock)

21
weed/server/filer_server_handlers.go

@ -4,6 +4,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"net/http" "net/http"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/stats"
@ -47,7 +48,22 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
fs.DeleteHandler(w, r) fs.DeleteHandler(w, r)
} }
stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds())
case "PUT":
case "POST", "PUT":
// wait until in flight data is less than the limit
contentLength := getContentLength(r)
fs.inFlightDataLimitCond.L.Lock()
for atomic.LoadInt64(&fs.inFlightDataSize) > fs.option.ConcurrentUploadLimit {
fs.inFlightDataLimitCond.Wait()
}
atomic.AddInt64(&fs.inFlightDataSize, contentLength)
fs.inFlightDataLimitCond.L.Unlock()
defer func() {
atomic.AddInt64(&fs.inFlightDataSize, -contentLength)
fs.inFlightDataLimitCond.Signal()
}()
if r.Method == "PUT" {
stats.FilerRequestCounter.WithLabelValues("put").Inc() stats.FilerRequestCounter.WithLabelValues("put").Inc()
if _, ok := r.URL.Query()["tagging"]; ok { if _, ok := r.URL.Query()["tagging"]; ok {
fs.PutTaggingHandler(w, r) fs.PutTaggingHandler(w, r)
@ -55,10 +71,11 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
fs.PostHandler(w, r) fs.PostHandler(w, r)
} }
stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds())
case "POST":
} else { // method == "POST"
stats.FilerRequestCounter.WithLabelValues("post").Inc() stats.FilerRequestCounter.WithLabelValues("post").Inc()
fs.PostHandler(w, r) fs.PostHandler(w, r)
stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds())
}
case "OPTIONS": case "OPTIONS":
stats.FilerRequestCounter.WithLabelValues("options").Inc() stats.FilerRequestCounter.WithLabelValues("options").Inc()
OptionsHandler(w, r, false) OptionsHandler(w, r, false)

8
weed/server/volume_server.go

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
"net/http" "net/http"
"sync"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -34,6 +35,10 @@ type VolumeServer struct {
fileSizeLimitBytes int64 fileSizeLimitBytes int64
isHeartbeating bool isHeartbeating bool
stopChan chan bool stopChan chan bool
inFlightDataSize int64
inFlightDataLimitCond *sync.Cond
concurrentUploadLimit int64
} }
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
@ -48,6 +53,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readRedirect bool, readRedirect bool,
compactionMBPerSecond int, compactionMBPerSecond int,
fileSizeLimitMB int, fileSizeLimitMB int,
concurrentUploadLimit int64,
) *VolumeServer { ) *VolumeServer {
v := util.GetViper() v := util.GetViper()
@ -72,6 +78,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024,
isHeartbeating: true, isHeartbeating: true,
stopChan: make(chan bool), stopChan: make(chan bool),
inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)),
concurrentUploadLimit: concurrentUploadLimit,
} }
vs.SeedMasterNodes = masterNodes vs.SeedMasterNodes = masterNodes

30
weed/server/volume_server_handlers.go

@ -2,7 +2,9 @@ package weed_server
import ( import (
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync/atomic"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
@ -40,8 +42,24 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
stats.DeleteRequest() stats.DeleteRequest()
vs.guard.WhiteList(vs.DeleteHandler)(w, r) vs.guard.WhiteList(vs.DeleteHandler)(w, r)
case "PUT", "POST": case "PUT", "POST":
// wait until in flight data is less than the limit
contentLength := getContentLength(r)
vs.inFlightDataLimitCond.L.Lock()
for atomic.LoadInt64(&vs.inFlightDataSize) > vs.concurrentUploadLimit {
vs.inFlightDataLimitCond.Wait()
}
atomic.AddInt64(&vs.inFlightDataSize, contentLength)
vs.inFlightDataLimitCond.L.Unlock()
defer func() {
atomic.AddInt64(&vs.inFlightDataSize, -contentLength)
vs.inFlightDataLimitCond.Signal()
}()
// processs uploads
stats.WriteRequest() stats.WriteRequest()
vs.guard.WhiteList(vs.PostHandler)(w, r) vs.guard.WhiteList(vs.PostHandler)(w, r)
case "OPTIONS": case "OPTIONS":
stats.ReadRequest() stats.ReadRequest()
w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS")
@ -49,6 +67,18 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
} }
} }
func getContentLength(r *http.Request) int64 {
contentLength := r.Header.Get("Content-Length")
if contentLength != "" {
length, err := strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return 0
}
return length
}
return 0
}
func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Request) { func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION) w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
if r.Header.Get("Origin") != "" { if r.Header.Get("Origin") != "" {

Loading…
Cancel
Save