9 changed files with 130 additions and 78 deletions
-
21weed/filer2/filechunks.go
-
2weed/filer2/reader_at.go
-
4weed/filer2/stream.go
-
2weed/replication/sink/azuresink/azure_sink.go
-
2weed/replication/sink/b2sink/b2_sink.go
-
2weed/replication/sink/gcssink/gcs_sink.go
-
86weed/util/chunk_cache/chunk_cache.go
-
6weed/util/chunk_cache/chunk_cache_on_disk_test.go
-
83weed/util/chunk_cache/on_disk_cache_layer.go
@ -0,0 +1,83 @@ |
|||
package chunk_cache |
|||
|
|||
import ( |
|||
"fmt" |
|||
"path" |
|||
"sort" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/storage" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
) |
|||
|
|||
type OnDiskCacheLayer struct { |
|||
diskCaches []*ChunkCacheVolume |
|||
} |
|||
|
|||
func NewOnDiskCacheLayer(dir, namePrefix string, volumeCount int, volumeSize int64) *OnDiskCacheLayer{ |
|||
c := &OnDiskCacheLayer{} |
|||
for i := 0; i < volumeCount; i++ { |
|||
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) |
|||
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024) |
|||
if err != nil { |
|||
glog.Errorf("failed to add cache %s : %v", fileName, err) |
|||
} else { |
|||
c.diskCaches = append(c.diskCaches, diskCache) |
|||
} |
|||
} |
|||
|
|||
// keep newest cache to the front
|
|||
sort.Slice(c.diskCaches, func(i, j int) bool { |
|||
return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime) |
|||
}) |
|||
|
|||
return c |
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { |
|||
|
|||
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit { |
|||
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset() |
|||
if resetErr != nil { |
|||
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) |
|||
return |
|||
} |
|||
for i := len(c.diskCaches) - 1; i > 0; i-- { |
|||
c.diskCaches[i] = c.diskCaches[i-1] |
|||
} |
|||
c.diskCaches[0] = t |
|||
} |
|||
|
|||
c.diskCaches[0].WriteNeedle(needleId, data) |
|||
|
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte){ |
|||
|
|||
var err error |
|||
|
|||
for _, diskCache := range c.diskCaches { |
|||
data, err = diskCache.GetNeedle(needleId) |
|||
if err == storage.ErrorNotFound { |
|||
continue |
|||
} |
|||
if err != nil { |
|||
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) |
|||
continue |
|||
} |
|||
if len(data) != 0 { |
|||
return |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
|
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) shutdown(){ |
|||
|
|||
for _, diskCache := range c.diskCaches { |
|||
diskCache.Shutdown() |
|||
} |
|||
|
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue