Browse Source

add option to enable caching

pull/439/head
chulin 9 years ago
parent
commit
2f805a229f
  1. 6
      weed/command/server.go
  2. 4
      weed/command/volume.go
  3. 4
      weed/server/volume_server.go
  4. 23
      weed/storage/needle_byte_cache.go

6
weed/command/server.go

@ -35,15 +35,11 @@ var cmdServer = &Command{
Short: "start a server, including volume server, and automatically elect a master server",
Long: `start both a volume server to provide storage spaces
and a master server to provide volume=>location mapping service and sequence number of file ids
This is provided as a convenient way to start both volume server and master server.
The servers are exactly the same as starting them separately.
So other volume servers can use this embedded master server also.
Optionally, one filer server can be started. Logically, filer servers should not be in a cluster.
They run with meta data on disk, not shared. So each filer server is different.
`,
}
@ -72,6 +68,7 @@ var (
volumeFixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", true, "Adjust jpg orientation when uploading.")
volumeReadRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
volumeServerPublicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
volumeEnableBytesCache = cmdServer.Flag.Bool("volume.cache.enable", false, "direct cache instead of OS cache, cost more memory.")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
serverWhiteList []string
@ -259,6 +256,7 @@ func runServer(cmd *Command, args []string) bool {
volumeNeedleMapKind,
*serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack,
serverWhiteList, *volumeFixJpgOrientation, *volumeReadRedirect,
*volumeEnableBytesCache,
)
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*volumePort))

4
weed/command/volume.go

@ -36,6 +36,7 @@ type VolumeServerOptions struct {
indexType *string
fixJpgOrientation *bool
readRedirect *bool
enableBytesCache *bool
}
func init() {
@ -54,13 +55,13 @@ func init() {
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb] mode for memory~performance balance.")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", true, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.enableBytesCache = cmdVolume.Flag.Bool("cache.enable", false, "direct cache instead of OS cache, cost more memory.")
}
var cmdVolume = &Command{
UsageLine: "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333",
Short: "start a volume server",
Long: `start a volume server to provide storage spaces
`,
}
@ -132,6 +133,7 @@ func runVolume(cmd *Command, args []string) bool {
*v.master, *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList,
*v.fixJpgOrientation, *v.readRedirect,
*v.enableBytesCache,
)
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)

4
weed/server/volume_server.go

@ -33,7 +33,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
dataCenter string, rack string,
whiteList []string,
fixJpgOrientation bool,
readRedirect bool) *VolumeServer {
readRedirect bool,
enableBytesCache bool) *VolumeServer {
vs := &VolumeServer{
pulseSeconds: pulseSeconds,
dataCenter: dataCenter,
@ -44,6 +45,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
}
vs.SetMasterNode(masterNode)
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
storage.EnableBytesCache = enableBytesCache
vs.guard = security.NewGuard(whiteList, "")

23
weed/storage/needle_byte_cache.go

@ -11,15 +11,14 @@ import (
)
var (
bytesCache *lru.Cache
bytesPool *util.BytesPool
EnableBytesCache = true
bytesCache *lru.Cache
bytesPool *util.BytesPool
)
/*
There are one level of caching, and one level of pooling.
In pooling, all []byte are fetched and returned to the pool bytesPool.
In caching, the string~[]byte mapping is cached
*/
func init() {
@ -48,11 +47,13 @@ func (block *Block) increaseReference() {
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
// check cache, return if found
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize)
if obj, found := bytesCache.Get(cacheKey); found {
block = obj.(*Block)
block.increaseReference()
dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil
if EnableBytesCache {
if obj, found := bytesCache.Get(cacheKey); found {
block = obj.(*Block)
block.increaseReference()
dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil
}
}
// get the []byte from pool
@ -61,7 +62,9 @@ func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []b
block = &Block{Bytes: b, refCount: 2}
dataSlice = block.Bytes[0:readSize]
_, err = r.ReadAt(dataSlice, offset)
bytesCache.Add(cacheKey, block)
if EnableBytesCache {
bytesCache.Add(cacheKey, block)
}
return dataSlice, block, err
}

Loading…
Cancel
Save