Browse Source

add option to enable caching

pull/439/head
chulin 9 years ago
parent
commit
2f805a229f
  1. 6
      weed/command/server.go
  2. 4
      weed/command/volume.go
  3. 4
      weed/server/volume_server.go
  4. 7
      weed/storage/needle_byte_cache.go

6
weed/command/server.go

@ -35,15 +35,11 @@ var cmdServer = &Command{
Short: "start a server, including volume server, and automatically elect a master server", Short: "start a server, including volume server, and automatically elect a master server",
Long: `start both a volume server to provide storage spaces Long: `start both a volume server to provide storage spaces
and a master server to provide volume=>location mapping service and sequence number of file ids and a master server to provide volume=>location mapping service and sequence number of file ids
This is provided as a convenient way to start both volume server and master server. This is provided as a convenient way to start both volume server and master server.
The servers are exactly the same as starting them separately. The servers are exactly the same as starting them separately.
So other volume servers can use this embedded master server also. So other volume servers can use this embedded master server also.
Optionally, one filer server can be started. Logically, filer servers should not be in a cluster. Optionally, one filer server can be started. Logically, filer servers should not be in a cluster.
They run with meta data on disk, not shared. So each filer server is different. They run with meta data on disk, not shared. So each filer server is different.
`, `,
} }
@ -72,6 +68,7 @@ var (
volumeFixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", true, "Adjust jpg orientation when uploading.") volumeFixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", true, "Adjust jpg orientation when uploading.")
volumeReadRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") volumeReadRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
volumeServerPublicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") volumeServerPublicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
volumeEnableBytesCache = cmdServer.Flag.Bool("volume.cache.enable", false, "direct cache instead of OS cache, cost more memory.")
isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer")
serverWhiteList []string serverWhiteList []string
@ -259,6 +256,7 @@ func runServer(cmd *Command, args []string) bool {
volumeNeedleMapKind, volumeNeedleMapKind,
*serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack, *serverIp+":"+strconv.Itoa(*masterPort), *volumePulse, *serverDataCenter, *serverRack,
serverWhiteList, *volumeFixJpgOrientation, *volumeReadRedirect, serverWhiteList, *volumeFixJpgOrientation, *volumeReadRedirect,
*volumeEnableBytesCache,
) )
glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*volumePort)) glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "at", *serverIp+":"+strconv.Itoa(*volumePort))

4
weed/command/volume.go

@ -36,6 +36,7 @@ type VolumeServerOptions struct {
indexType *string indexType *string
fixJpgOrientation *bool fixJpgOrientation *bool
readRedirect *bool readRedirect *bool
enableBytesCache *bool
} }
func init() { func init() {
@ -54,13 +55,13 @@ func init() {
v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb] mode for memory~performance balance.") v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb] mode for memory~performance balance.")
v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", true, "Adjust jpg orientation when uploading.") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", true, "Adjust jpg orientation when uploading.")
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.enableBytesCache = cmdVolume.Flag.Bool("cache.enable", false, "direct cache instead of OS cache, cost more memory.")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
UsageLine: "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333", UsageLine: "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333",
Short: "start a volume server", Short: "start a volume server",
Long: `start a volume server to provide storage spaces Long: `start a volume server to provide storage spaces
`, `,
} }
@ -132,6 +133,7 @@ func runVolume(cmd *Command, args []string) bool {
*v.master, *v.pulseSeconds, *v.dataCenter, *v.rack, *v.master, *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList, v.whiteList,
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.enableBytesCache,
) )
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)

4
weed/server/volume_server.go

@ -33,7 +33,8 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
dataCenter string, rack string, dataCenter string, rack string,
whiteList []string, whiteList []string,
fixJpgOrientation bool, fixJpgOrientation bool,
readRedirect bool) *VolumeServer {
readRedirect bool,
enableBytesCache bool) *VolumeServer {
vs := &VolumeServer{ vs := &VolumeServer{
pulseSeconds: pulseSeconds, pulseSeconds: pulseSeconds,
dataCenter: dataCenter, dataCenter: dataCenter,
@ -44,6 +45,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
} }
vs.SetMasterNode(masterNode) vs.SetMasterNode(masterNode)
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)
storage.EnableBytesCache = enableBytesCache
vs.guard = security.NewGuard(whiteList, "") vs.guard = security.NewGuard(whiteList, "")

7
weed/storage/needle_byte_cache.go

@ -11,15 +11,14 @@ import (
) )
var ( var (
EnableBytesCache = true
bytesCache *lru.Cache bytesCache *lru.Cache
bytesPool *util.BytesPool bytesPool *util.BytesPool
) )
/* /*
There are one level of caching, and one level of pooling. There are one level of caching, and one level of pooling.
In pooling, all []byte are fetched and returned to the pool bytesPool. In pooling, all []byte are fetched and returned to the pool bytesPool.
In caching, the string~[]byte mapping is cached In caching, the string~[]byte mapping is cached
*/ */
func init() { func init() {
@ -48,12 +47,14 @@ func (block *Block) increaseReference() {
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) { func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
// check cache, return if found // check cache, return if found
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize) cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize)
if EnableBytesCache {
if obj, found := bytesCache.Get(cacheKey); found { if obj, found := bytesCache.Get(cacheKey); found {
block = obj.(*Block) block = obj.(*Block)
block.increaseReference() block.increaseReference()
dataSlice = block.Bytes[0:readSize] dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil return dataSlice, block, nil
} }
}
// get the []byte from pool // get the []byte from pool
b := bytesPool.Get(readSize) b := bytesPool.Get(readSize)
@ -61,7 +62,9 @@ func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []b
block = &Block{Bytes: b, refCount: 2} block = &Block{Bytes: b, refCount: 2}
dataSlice = block.Bytes[0:readSize] dataSlice = block.Bytes[0:readSize]
_, err = r.ReadAt(dataSlice, offset) _, err = r.ReadAt(dataSlice, offset)
if EnableBytesCache {
bytesCache.Add(cacheKey, block) bytesCache.Add(cacheKey, block)
}
return dataSlice, block, err return dataSlice, block, err
} }

Loading…
Cancel
Save