diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index d1f28520a..e2e628407 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -183,6 +183,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } + pages.f.wfs.chunkCache.SetChunk(fileId, data) return &filer_pb.FileChunk{ FileId: fileId, diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 9c3eb4202..4897d3a08 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -92,7 +92,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { if fh.f.reader == nil { chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) - fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews) + fh.f.reader = NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache) } totalRead, err := fh.f.reader.ReadAt(buff, offset) @@ -153,6 +153,8 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err fh.dirtyPages.releaseResource() fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) } + fh.f.entryViewCache = nil + fh.f.reader = nil return nil } diff --git a/weed/filer2/reader_at.go b/weed/filesys/reader_at.go similarity index 72% rename from weed/filer2/reader_at.go rename to weed/filesys/reader_at.go index 06bc17c81..39ec4e0ac 100644 --- a/weed/filer2/reader_at.go +++ b/weed/filesys/reader_at.go @@ -1,4 +1,4 @@ -package filer2 +package filesys import ( "bytes" @@ -7,30 +7,34 @@ import ( "io" "sync" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/pb_cache" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) type ChunkReadAt struct { masterClient *wdclient.MasterClient - chunkViews []*ChunkView + chunkViews []*filer2.ChunkView buffer []byte bufferOffset int64 lookupFileId func(fileId string) (targetUrl string, err error) readerLock sync.Mutex + + chunkCache *pb_cache.ChunkCache } // var _ = io.ReaderAt(&ChunkReadAt{}) -func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView) *ChunkReadAt { +func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*filer2.ChunkView, chunkCache *pb_cache.ChunkCache) *ChunkReadAt { return &ChunkReadAt{ chunkViews: chunkViews, lookupFileId: func(fileId string) (targetUrl string, err error) { err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - vid := VolumeId(fileId) + vid := filer2.VolumeId(fileId) resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) @@ -61,7 +65,6 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { c.readerLock.Lock() defer c.readerLock.Unlock() - for n < len(p) && err == nil { readCount, readErr := c.doReadAt(p[n:], offset+int64(n)) n += readCount @@ -80,7 +83,8 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { found = true if c.bufferOffset != chunk.LogicOffset { - c.fetchChunkToBuffer(chunk) + c.buffer, err = c.fetchChunkData(chunk) + c.bufferOffset = chunk.LogicOffset } break } @@ -97,27 +101,34 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { } -func (c *ChunkReadAt) fetchChunkToBuffer(chunkView *ChunkView) error { +func (c *ChunkReadAt) fetchChunkData(chunkView *filer2.ChunkView) ([]byte, error) { - // fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + + chunkData := c.chunkCache.GetChunk(chunkView.FileId) + if chunkData != nil { + glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + return chunkData, nil + } urlString, err := c.lookupFileId(chunkView.FileId) if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err + return nil, err } var buffer bytes.Buffer - err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { + err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { buffer.Write(data) }) if err != nil { glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) - return err + return nil, err } - c.buffer = buffer.Bytes() - c.bufferOffset = chunkView.LogicOffset glog.V(3).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) - return nil + chunkData = buffer.Bytes() + c.chunkCache.SetChunk(chunkView.FileId, chunkData) + + return chunkData, nil } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 5075687e3..059a0ecc1 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -15,6 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/pb_cache" "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -62,6 +63,8 @@ type WFS struct { root fs.Node fsNodeCache *FsCache + + chunkCache *pb_cache.ChunkCache } type statsCache struct { filer_pb.StatisticsResponse @@ -78,6 +81,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { return make([]byte, option.ChunkSizeLimit) }, }, + chunkCache: pb_cache.NewChunkCache(), } wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} diff --git a/weed/pb/pb_cache/chunk_cache.go b/weed/pb/pb_cache/chunk_cache.go new file mode 100644 index 000000000..5ea5b17ed --- /dev/null +++ b/weed/pb/pb_cache/chunk_cache.go @@ -0,0 +1,32 @@ +package pb_cache + +import ( + "time" + + "github.com/karlseguin/ccache" +) + +// a global cache for recently accessed file chunks +type ChunkCache struct { + cache *ccache.Cache +} + +func NewChunkCache() *ChunkCache { + return &ChunkCache{ + cache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), + } +} + +func (c *ChunkCache) GetChunk(fileId string) []byte { + item := c.cache.Get(fileId) + if item == nil { + return nil + } + data := item.Value().([]byte) + item.Extend(time.Hour) + return data +} + +func (c *ChunkCache) SetChunk(fileId string, data []byte) { + c.cache.Set(fileId, data, time.Hour) +} diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index b3b6e3e8f..95dbef5f8 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -13,9 +13,11 @@ import ( "golang.org/x/net/webdav" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/pb_cache" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -66,6 +68,7 @@ type WebDavFileSystem struct { secret security.SigningKey filer *filer2.Filer grpcDialOption grpc.DialOption + chunkCache *pb_cache.ChunkCache } type FileInfo struct { @@ -95,7 +98,8 @@ type WebDavFile struct { func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { return &WebDavFileSystem{ - option: option, + option: option, + chunkCache: pb_cache.NewChunkCache(), }, nil } @@ -476,7 +480,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } if f.reader == nil { chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) - f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews) + f.reader = filesys.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache) } readSize, err = f.reader.ReadAt(p, f.off)