You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

150 lines
4.0 KiB

5 years ago
5 years ago
5 years ago
  1. package filer2
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "io"
  7. "sync"
  8. "github.com/chrislusf/seaweedfs/weed/glog"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. "github.com/chrislusf/seaweedfs/weed/util"
  11. "github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
  12. "github.com/chrislusf/seaweedfs/weed/wdclient"
  13. )
  14. type ChunkReadAt struct {
  15. masterClient *wdclient.MasterClient
  16. chunkViews []*ChunkView
  17. buffer []byte
  18. bufferOffset int64
  19. lookupFileId func(fileId string) (targetUrl string, err error)
  20. readerLock sync.Mutex
  21. chunkCache *chunk_cache.ChunkCache
  22. }
  23. // var _ = io.ReaderAt(&ChunkReadAt{})
  24. func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
  25. return &ChunkReadAt{
  26. chunkViews: chunkViews,
  27. lookupFileId: func(fileId string) (targetUrl string, err error) {
  28. err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
  29. vid := VolumeId(fileId)
  30. resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
  31. VolumeIds: []string{vid},
  32. })
  33. if err != nil {
  34. return err
  35. }
  36. locations := resp.LocationsMap[vid]
  37. if locations == nil || len(locations.Locations) == 0 {
  38. glog.V(0).Infof("failed to locate %s", fileId)
  39. return fmt.Errorf("failed to locate %s", fileId)
  40. }
  41. volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url)
  42. targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId)
  43. return nil
  44. })
  45. return
  46. },
  47. bufferOffset: -1,
  48. chunkCache: chunkCache,
  49. }
  50. }
  51. func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
  52. c.readerLock.Lock()
  53. defer c.readerLock.Unlock()
  54. for n < len(p) && err == nil {
  55. readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
  56. n += readCount
  57. err = readErr
  58. if readCount == 0 {
  59. return n, nil
  60. }
  61. }
  62. return
  63. }
  64. func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
  65. var found bool
  66. for _, chunk := range c.chunkViews {
  67. if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
  68. found = true
  69. if c.bufferOffset != chunk.LogicOffset {
  70. c.buffer, err = c.fetchChunkData(chunk)
  71. c.bufferOffset = chunk.LogicOffset
  72. }
  73. break
  74. }
  75. }
  76. if !found {
  77. return 0, io.EOF
  78. }
  79. n = copy(p, c.buffer[offset-c.bufferOffset:])
  80. // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
  81. return
  82. }
  83. func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) {
  84. // fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  85. hasDataInCache := false
  86. chunkData := c.chunkCache.GetChunk(chunkView.FileId)
  87. if chunkData != nil {
  88. glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
  89. hasDataInCache = true
  90. } else {
  91. chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
  92. if err != nil {
  93. return nil, err
  94. }
  95. }
  96. if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
  97. return nil, fmt.Errorf("unexpected larger chunkView [%d,%d) than chunk %d", chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
  98. }
  99. data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
  100. if !hasDataInCache {
  101. c.chunkCache.SetChunk(chunkView.FileId, chunkData)
  102. }
  103. return data, nil
  104. }
  105. func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
  106. urlString, err := c.lookupFileId(fileId)
  107. if err != nil {
  108. glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
  109. return nil, err
  110. }
  111. var buffer bytes.Buffer
  112. err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
  113. buffer.Write(data)
  114. })
  115. if err != nil {
  116. glog.V(1).Infof("read %s failed, err: %v", fileId, err)
  117. return nil, err
  118. }
  119. return buffer.Bytes(), nil
  120. }