You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

157 lines
4.7 KiB

5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
  1. package filer2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "sync"
  7. "github.com/chrislusf/seaweedfs/weed/glog"
  8. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  9. "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
  10. "github.com/chrislusf/seaweedfs/weed/wdclient"
  11. )
  12. type ChunkReadAt struct {
  13. masterClient *wdclient.MasterClient
  14. chunkViews []*ChunkView
  15. buffer []byte
  16. bufferOffset int64
  17. lookupFileId func(fileId string) (targetUrl string, err error)
  18. readerLock sync.Mutex
  19. fileSize int64
  20. chunkCache *chunk_cache.ChunkCache
  21. }
  22. // var _ = io.ReaderAt(&ChunkReadAt{})
  23. type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error)
  24. func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
  25. return func(fileId string) (targetUrl string, err error) {
  26. err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  27. vid := VolumeId(fileId)
  28. resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
  29. VolumeIds: []string{vid},
  30. })
  31. if err != nil {
  32. return err
  33. }
  34. locations := resp.LocationsMap[vid]
  35. if locations == nil || len(locations.Locations) == 0 {
  36. glog.V(0).Infof("failed to locate %s", fileId)
  37. return fmt.Errorf("failed to locate %s", fileId)
  38. }
  39. volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
  40. targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
  41. return nil
  42. })
  43. return
  44. }
  45. }
  46. func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
  47. return &ChunkReadAt{
  48. chunkViews: chunkViews,
  49. lookupFileId: LookupFn(filerClient),
  50. bufferOffset: -1,
  51. chunkCache: chunkCache,
  52. fileSize: fileSize,
  53. }
  54. }
  55. func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
  56. c.readerLock.Lock()
  57. defer c.readerLock.Unlock()
  58. for n < len(p) && err == nil {
  59. readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
  60. n += readCount
  61. err = readErr
  62. }
  63. return
  64. }
  65. func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
  66. var found bool
  67. var chunkStart, chunkStop int64
  68. for _, chunk := range c.chunkViews {
  69. // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d), %v && %v\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size), chunk.LogicOffset <= offset, offset < chunk.LogicOffset+int64(chunk.Size))
  70. chunkStart, chunkStop = max(chunk.LogicOffset, offset), min(chunk.LogicOffset+int64(chunk.Size), offset+int64(len(p)))
  71. if chunkStart < chunkStop {
  72. found = true
  73. if c.bufferOffset != chunk.LogicOffset {
  74. c.buffer, err = c.fetchChunkData(chunk)
  75. if err != nil {
  76. glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
  77. }
  78. c.bufferOffset = chunk.LogicOffset
  79. }
  80. break
  81. }
  82. }
  83. // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d), found:%v, err:%v\n", offset, offset+int64(len(p)), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), found, err)
  84. if err != nil {
  85. return
  86. }
  87. if found {
  88. n = int(chunkStart-offset) + copy(p[chunkStart-offset:chunkStop-offset], c.buffer[chunkStart-c.bufferOffset:chunkStop-c.bufferOffset])
  89. return
  90. }
  91. n = len(p)
  92. if offset+int64(n) >= c.fileSize {
  93. err = io.EOF
  94. n = int(c.fileSize - offset)
  95. }
  96. return
  97. }
  98. func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
  99. glog.V(5).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  100. hasDataInCache := false
  101. chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
  102. if chunkData != nil {
  103. glog.V(5).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  104. hasDataInCache = true
  105. } else {
  106. chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
  107. if err != nil {
  108. return nil, err
  109. }
  110. }
  111. if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
  112. glog.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
  113. return nil, fmt.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
  114. }
  115. data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
  116. if !hasDataInCache {
  117. c.chunkCache.SetChunk(chunkView.FileId, chunkData)
  118. }
  119. return data, nil
  120. }
  121. func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
  122. return fetchChunk(c.lookupFileId, fileId, cipherKey, isGzipped)
  123. }