You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

264 lines
6.3 KiB

7 years ago
6 years ago
5 years ago
7 years ago
6 years ago
7 years ago
7 years ago
6 years ago
7 years ago
7 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
7 years ago
6 years ago
7 years ago
7 years ago
6 years ago
6 years ago
6 years ago
5 years ago
7 years ago
6 years ago
5 years ago
7 years ago
6 years ago
7 years ago
6 years ago
6 years ago
6 years ago
7 years ago
7 years ago
6 years ago
7 years ago
6 years ago
7 years ago
6 years ago
7 years ago
7 years ago
5 years ago
7 years ago
5 years ago
5 years ago
7 years ago
  1. package filer2
  2. import (
  3. "fmt"
  4. "hash/fnv"
  5. "math"
  6. "sort"
  7. "sync"
  8. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  9. )
  10. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  11. for _, c := range chunks {
  12. t := uint64(c.Offset + int64(c.Size))
  13. if size < t {
  14. size = t
  15. }
  16. }
  17. return
  18. }
  19. func ETag(entry *filer_pb.Entry) (etag string) {
  20. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  21. return ETagChunks(entry.Chunks)
  22. }
  23. return fmt.Sprintf("%x", entry.Attributes.Md5)
  24. }
  25. func ETagEntry(entry *Entry) (etag string) {
  26. if entry.Attr.Md5 == nil {
  27. return ETagChunks(entry.Chunks)
  28. }
  29. return fmt.Sprintf("%x", entry.Attr.Md5)
  30. }
  31. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  32. if len(chunks) == 1 {
  33. return chunks[0].ETag
  34. }
  35. h := fnv.New32a()
  36. for _, c := range chunks {
  37. h.Write([]byte(c.ETag))
  38. }
  39. return fmt.Sprintf("%x", h.Sum32())
  40. }
  41. func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  42. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  43. fileIds := make(map[string]bool)
  44. for _, interval := range visibles {
  45. fileIds[interval.fileId] = true
  46. }
  47. for _, chunk := range chunks {
  48. if _, found := fileIds[chunk.GetFileIdString()]; found {
  49. compacted = append(compacted, chunk)
  50. } else {
  51. garbage = append(garbage, chunk)
  52. }
  53. }
  54. return
  55. }
  56. func MinusChunks(lookupFileIdFn LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  57. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
  58. if aErr != nil {
  59. return nil, aErr
  60. }
  61. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
  62. if bErr != nil {
  63. return nil, bErr
  64. }
  65. delta = append(delta, DoMinusChunks(aData, bData)...)
  66. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  67. return
  68. }
  69. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  70. fileIds := make(map[string]bool)
  71. for _, interval := range bs {
  72. fileIds[interval.GetFileIdString()] = true
  73. }
  74. for _, chunk := range as {
  75. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  76. delta = append(delta, chunk)
  77. }
  78. }
  79. return
  80. }
  81. type ChunkView struct {
  82. FileId string
  83. Offset int64
  84. Size uint64
  85. LogicOffset int64
  86. ChunkSize uint64
  87. CipherKey []byte
  88. IsGzipped bool
  89. }
  90. func (cv *ChunkView) IsFullChunk() bool {
  91. return cv.Size == cv.ChunkSize
  92. }
  93. func ViewFromChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  94. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  95. return ViewFromVisibleIntervals(visibles, offset, size)
  96. }
  97. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  98. stop := offset + size
  99. if size == math.MaxInt64 {
  100. stop = math.MaxInt64
  101. }
  102. if stop < offset {
  103. stop = math.MaxInt64
  104. }
  105. for _, chunk := range visibles {
  106. if chunk.start <= offset && offset < chunk.stop && offset < stop {
  107. views = append(views, &ChunkView{
  108. FileId: chunk.fileId,
  109. Offset: offset - chunk.start, // offset is the data starting location in this file id
  110. Size: uint64(min(chunk.stop, stop) - offset),
  111. LogicOffset: offset,
  112. ChunkSize: chunk.chunkSize,
  113. CipherKey: chunk.cipherKey,
  114. IsGzipped: chunk.isGzipped,
  115. })
  116. offset = min(chunk.stop, stop)
  117. }
  118. }
  119. return views
  120. }
  121. func logPrintf(name string, visibles []VisibleInterval) {
  122. /*
  123. log.Printf("%s len %d", name, len(visibles))
  124. for _, v := range visibles {
  125. log.Printf("%s: => %+v", name, v)
  126. }
  127. */
  128. }
  129. var bufPool = sync.Pool{
  130. New: func() interface{} {
  131. return new(VisibleInterval)
  132. },
  133. }
  134. func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval {
  135. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  136. length := len(visibles)
  137. if length == 0 {
  138. return append(visibles, newV)
  139. }
  140. last := visibles[length-1]
  141. if last.stop <= chunk.Offset {
  142. return append(visibles, newV)
  143. }
  144. logPrintf(" before", visibles)
  145. for _, v := range visibles {
  146. if v.start < chunk.Offset && chunk.Offset < v.stop {
  147. newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
  148. }
  149. chunkStop := chunk.Offset + int64(chunk.Size)
  150. if v.start < chunkStop && chunkStop < v.stop {
  151. newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped))
  152. }
  153. if chunkStop <= v.start || v.stop <= chunk.Offset {
  154. newVisibles = append(newVisibles, v)
  155. }
  156. }
  157. newVisibles = append(newVisibles, newV)
  158. logPrintf(" append", newVisibles)
  159. for i := len(newVisibles) - 1; i >= 0; i-- {
  160. if i > 0 && newV.start < newVisibles[i-1].start {
  161. newVisibles[i] = newVisibles[i-1]
  162. } else {
  163. newVisibles[i] = newV
  164. break
  165. }
  166. }
  167. logPrintf(" sorted", newVisibles)
  168. return newVisibles
  169. }
  170. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  171. // If the file chunk content is a chunk manifest
  172. func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
  173. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
  174. sort.Slice(chunks, func(i, j int) bool {
  175. return chunks[i].Mtime < chunks[j].Mtime
  176. })
  177. var newVisibles []VisibleInterval
  178. for _, chunk := range chunks {
  179. newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk)
  180. t := visibles[:0]
  181. visibles = newVisibles
  182. newVisibles = t
  183. logPrintf("add", visibles)
  184. }
  185. return
  186. }
  187. // find non-overlapping visible intervals
  188. // visible interval map to one file chunk
  189. type VisibleInterval struct {
  190. start int64
  191. stop int64
  192. modifiedTime int64
  193. fileId string
  194. chunkSize uint64
  195. cipherKey []byte
  196. isGzipped bool
  197. }
  198. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  199. return VisibleInterval{
  200. start: start,
  201. stop: stop,
  202. fileId: fileId,
  203. modifiedTime: modifiedTime,
  204. chunkSize: chunkSize,
  205. cipherKey: cipherKey,
  206. isGzipped: isGzipped,
  207. }
  208. }
  209. func min(x, y int64) int64 {
  210. if x <= y {
  211. return x
  212. }
  213. return y
  214. }