Browse Source

Revert "weed mount, weed dav add option to force cache"

This reverts commit 7367b976b0.
pull/5976/head
chrislu 4 months ago
parent
commit
18afdb15b6
  1. 2
      weed/command/mount.go
  2. 1
      weed/command/mount_std.go
  3. 3
      weed/command/webdav.go
  4. 5
      weed/filer/reader_at.go
  5. 4
      weed/filer/reader_cache.go
  6. 3
      weed/mount/weedfs.go
  7. 3
      weed/server/webdav_server.go
  8. 6
      weed/util/chunk_cache/chunk_cache.go
  9. 4
      weed/util/chunk_cache/chunk_cache_on_disk_test.go

2
weed/command/mount.go

@ -20,7 +20,6 @@ type MountOptions struct {
cacheDirForRead *string
cacheDirForWrite *string
cacheSizeMBForRead *int64
forceCache *bool
dataCenter *string
allowOthers *bool
umaskString *string
@ -59,7 +58,6 @@ func init() {
mountOptions.cacheDirForRead = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data")
mountOptions.cacheSizeMBForRead = cmdMount.Flag.Int64("cacheCapacityMB", 0, "file chunk read cache capacity in MB")
mountOptions.cacheDirForWrite = cmdMount.Flag.String("cacheDirWrite", "", "buffer writes mostly for large files")
mountOptions.forceCache = cmdMount.Flag.Bool("forceCache", true, "force to cache all reads")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")

1
weed/command/mount_std.go

@ -235,7 +235,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
CacheDirForRead: *option.cacheDirForRead,
CacheSizeMBForRead: *option.cacheSizeMBForRead,
CacheDirForWrite: cacheDirForWrite,
ForceCache: *option.forceCache,
DataCenter: *option.dataCenter,
Quota: int64(*option.collectionQuota) * 1024 * 1024,
MountUid: uid,

3
weed/command/webdav.go

@ -32,7 +32,6 @@ type WebDavOption struct {
tlsCertificate *string
cacheDir *string
cacheSizeMB *int64
forceCache *bool
maxMB *int
}
@ -47,7 +46,6 @@ func init() {
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 0, "local cache capacity in MB")
webDavStandaloneOptions.forceCache = cmdWebDav.Flag.Bool("forceCache", false, "force to cache reads to local disk")
webDavStandaloneOptions.maxMB = cmdWebDav.Flag.Int("maxMB", 4, "split files larger than the limit")
webDavStandaloneOptions.filerRootPath = cmdWebDav.Flag.String("filer.path", "/", "use this remote path from filer server")
}
@ -120,7 +118,6 @@ func (wo *WebDavOption) startWebDav() bool {
Cipher: cipher,
CacheDir: util.ResolvePath(*wo.cacheDir),
CacheSizeMB: *wo.cacheSizeMB,
ForceCache: *wo.forceCache,
MaxMB: *wo.maxMB,
})
if webdavServer_err != nil {

5
weed/filer/reader_at.go

@ -19,7 +19,6 @@ type ChunkReadAt struct {
fileSize int64
readerCache *ReaderCache
readerPattern *ReaderPattern
forceCache bool
lastChunkFid string
}
@ -197,9 +196,7 @@ func (c *ChunkReadAt) readChunkSliceAt(buffer []byte, chunkView *ChunkView, next
if n > 0 {
return n, err
}
if !c.forceCache {
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
}
return fetchChunkRange(buffer, c.readerCache.lookupFileIdFn, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset))
}
n, err = c.readerCache.ReadChunkAt(buffer, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int64(offset), int(chunkView.ChunkSize), chunkView.ViewOffset == 0)

4
weed/filer/reader_cache.go

@ -7,9 +7,9 @@ import (
"time"
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
"github.com/seaweedfs/seaweedfs/weed/util/mem"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
)
type ReaderCache struct {
@ -69,7 +69,7 @@ func (rc *ReaderCache) MaybeCache(chunkViews *Interval[*ChunkView]) {
// glog.V(4).Infof("prefetch %s offset %d", chunkView.FileId, chunkView.ViewOffset)
// cache this chunk if not yet
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), chunkView.ViewOffset == 0)
cacher := newSingleChunkCacher(rc, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped, int(chunkView.ChunkSize), false)
go cacher.startCaching()
<-cacher.cacheStartedCh
rc.downloaders[chunkView.FileId] = cacher

3
weed/mount/weedfs.go

@ -41,7 +41,6 @@ type Option struct {
CacheDirForRead string
CacheSizeMBForRead int64
CacheDirForWrite string
ForceCache bool
DataCenter string
Umask os.FileMode
Quota int64
@ -96,7 +95,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs.option.filerIndex = int32(rand.Intn(len(option.FilerAddresses)))
wfs.option.setupUniqueCacheDirectory()
if option.CacheSizeMBForRead > 0 {
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024, option.ForceCache)
wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, option.getUniqueCacheDirForRead(), option.CacheSizeMBForRead, 1024*1024)
}
wfs.metaCache = meta_cache.NewMetaCache(path.Join(option.getUniqueCacheDirForRead(), "meta"), option.UidGidMapper,

3
weed/server/webdav_server.go

@ -38,7 +38,6 @@ type WebDavOption struct {
Cipher bool
CacheDir string
CacheSizeMB int64
ForceCache bool
MaxMB int
}
@ -134,7 +133,7 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
cacheDir := path.Join(option.CacheDir, cacheUniqueId)
os.MkdirAll(cacheDir, os.FileMode(0755))
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024, option.ForceCache)
chunkCache := chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024)
t := &WebDavFileSystem{
option: option,
chunkCache: chunkCache,

6
weed/util/chunk_cache/chunk_cache.go

@ -23,16 +23,14 @@ type TieredChunkCache struct {
onDiskCacheSizeLimit0 uint64
onDiskCacheSizeLimit1 uint64
onDiskCacheSizeLimit2 uint64
forceCache bool
}
var _ ChunkCache = &TieredChunkCache{}
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64, forceCache bool) *TieredChunkCache {
func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
c := &TieredChunkCache{
memCache: NewChunkCacheInMemory(maxEntries),
forceCache: forceCache,
memCache: NewChunkCacheInMemory(maxEntries),
}
c.diskCaches = make([]*OnDiskCacheLayer, 3)
c.onDiskCacheSizeLimit0 = uint64(unitSize)

4
weed/util/chunk_cache/chunk_cache_on_disk_test.go

@ -13,7 +13,7 @@ func TestOnDisk(t *testing.T) {
totalDiskSizeInKB := int64(32)
cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
cache := NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
writeCount := 5
type test_data struct {
@ -61,7 +61,7 @@ func TestOnDisk(t *testing.T) {
cache.Shutdown()
cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024, false)
cache = NewTieredChunkCache(2, tmpDir, totalDiskSizeInKB, 1024)
for i := 0; i < 2; i++ {
data := mem.Allocate(testData[i].size)

Loading…
Cancel
Save