You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

289 lines
7.5 KiB

7 years ago
6 years ago
5 years ago
7 years ago
6 years ago
7 years ago
7 years ago
6 years ago
7 years ago
7 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
7 years ago
4 years ago
6 years ago
6 years ago
4 years ago
7 years ago
6 years ago
6 years ago
6 years ago
7 years ago
6 years ago
7 years ago
6 years ago
6 years ago
7 years ago
6 years ago
6 years ago
6 years ago
7 years ago
7 years ago
6 years ago
6 years ago
7 years ago
6 years ago
4 years ago
7 years ago
6 years ago
7 years ago
7 years ago
5 years ago
7 years ago
5 years ago
7 years ago
  1. package filer
  2. import (
  3. "fmt"
  4. "hash/fnv"
  5. "math"
  6. "sort"
  7. "sync"
  8. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  9. )
  10. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  11. for _, c := range chunks {
  12. t := uint64(c.Offset + int64(c.Size))
  13. if size < t {
  14. size = t
  15. }
  16. }
  17. return
  18. }
  19. func FileSize(entry *filer_pb.Entry) (size uint64) {
  20. return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize)
  21. }
  22. func ETag(entry *filer_pb.Entry) (etag string) {
  23. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  24. return ETagChunks(entry.Chunks)
  25. }
  26. return fmt.Sprintf("%x", entry.Attributes.Md5)
  27. }
  28. func ETagEntry(entry *Entry) (etag string) {
  29. if entry.Attr.Md5 == nil {
  30. return ETagChunks(entry.Chunks)
  31. }
  32. return fmt.Sprintf("%x", entry.Attr.Md5)
  33. }
  34. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  35. if len(chunks) == 1 {
  36. return chunks[0].ETag
  37. }
  38. h := fnv.New32a()
  39. for _, c := range chunks {
  40. h.Write([]byte(c.ETag))
  41. }
  42. return fmt.Sprintf("%x", h.Sum32())
  43. }
  44. func CompactFileChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  45. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  46. fileIds := make(map[string]bool)
  47. for _, interval := range visibles {
  48. fileIds[interval.fileId] = true
  49. }
  50. for _, chunk := range chunks {
  51. if _, found := fileIds[chunk.GetFileIdString()]; found {
  52. compacted = append(compacted, chunk)
  53. } else {
  54. garbage = append(garbage, chunk)
  55. }
  56. }
  57. return
  58. }
  59. func MinusChunks(lookupFileIdFn LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  60. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
  61. if aErr != nil {
  62. return nil, aErr
  63. }
  64. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
  65. if bErr != nil {
  66. return nil, bErr
  67. }
  68. delta = append(delta, DoMinusChunks(aData, bData)...)
  69. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  70. return
  71. }
  72. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  73. fileIds := make(map[string]bool)
  74. for _, interval := range bs {
  75. fileIds[interval.GetFileIdString()] = true
  76. }
  77. for _, chunk := range as {
  78. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  79. delta = append(delta, chunk)
  80. }
  81. }
  82. return
  83. }
  84. type ChunkView struct {
  85. FileId string
  86. Offset int64
  87. Size uint64
  88. LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
  89. ChunkSize uint64
  90. CipherKey []byte
  91. IsGzipped bool
  92. }
  93. func (cv *ChunkView) IsFullChunk() bool {
  94. return cv.Size == cv.ChunkSize
  95. }
  96. func ViewFromChunks(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  97. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  98. return ViewFromVisibleIntervals(visibles, offset, size)
  99. }
  100. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  101. stop := offset + size
  102. if size == math.MaxInt64 {
  103. stop = math.MaxInt64
  104. }
  105. if stop < offset {
  106. stop = math.MaxInt64
  107. }
  108. for _, chunk := range visibles {
  109. chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
  110. if chunkStart < chunkStop {
  111. views = append(views, &ChunkView{
  112. FileId: chunk.fileId,
  113. Offset: chunkStart - chunk.start + chunk.chunkOffset,
  114. Size: uint64(chunkStop - chunkStart),
  115. LogicOffset: chunkStart,
  116. ChunkSize: chunk.chunkSize,
  117. CipherKey: chunk.cipherKey,
  118. IsGzipped: chunk.isGzipped,
  119. })
  120. }
  121. }
  122. return views
  123. }
  124. func logPrintf(name string, visibles []VisibleInterval) {
  125. /*
  126. glog.V(0).Infof("%s len %d", name, len(visibles))
  127. for _, v := range visibles {
  128. glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
  129. }
  130. */
  131. }
  132. var bufPool = sync.Pool{
  133. New: func() interface{} {
  134. return new(VisibleInterval)
  135. },
  136. }
  137. func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
  138. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  139. length := len(visibles)
  140. if length == 0 {
  141. return append(visibles, newV)
  142. }
  143. last := visibles[length-1]
  144. if last.stop <= chunk.Offset {
  145. return append(visibles, newV)
  146. }
  147. logPrintf(" before", visibles)
  148. // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
  149. chunkStop := chunk.Offset + int64(chunk.Size)
  150. for _, v := range visibles {
  151. if v.start < chunk.Offset && chunk.Offset < v.stop {
  152. t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
  153. newVisibles = append(newVisibles, t)
  154. // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  155. }
  156. if v.start < chunkStop && chunkStop < v.stop {
  157. t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
  158. newVisibles = append(newVisibles, t)
  159. // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  160. }
  161. if chunkStop <= v.start || v.stop <= chunk.Offset {
  162. newVisibles = append(newVisibles, v)
  163. // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
  164. }
  165. }
  166. newVisibles = append(newVisibles, newV)
  167. logPrintf(" append", newVisibles)
  168. for i := len(newVisibles) - 1; i >= 0; i-- {
  169. if i > 0 && newV.start < newVisibles[i-1].start {
  170. newVisibles[i] = newVisibles[i-1]
  171. } else {
  172. newVisibles[i] = newV
  173. break
  174. }
  175. }
  176. logPrintf(" sorted", newVisibles)
  177. return newVisibles
  178. }
  179. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  180. // If the file chunk content is a chunk manifest
  181. func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
  182. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
  183. sort.Slice(chunks, func(i, j int) bool {
  184. if chunks[i].Mtime == chunks[j].Mtime {
  185. filer_pb.EnsureFid(chunks[i])
  186. filer_pb.EnsureFid(chunks[j])
  187. if chunks[i].Fid == nil || chunks[j].Fid == nil {
  188. return true
  189. }
  190. return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
  191. }
  192. return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run
  193. })
  194. for _, chunk := range chunks {
  195. // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
  196. visibles = MergeIntoVisibles(visibles, chunk)
  197. logPrintf("add", visibles)
  198. }
  199. return
  200. }
  201. // find non-overlapping visible intervals
  202. // visible interval map to one file chunk
  203. type VisibleInterval struct {
  204. start int64
  205. stop int64
  206. modifiedTime int64
  207. fileId string
  208. chunkOffset int64
  209. chunkSize uint64
  210. cipherKey []byte
  211. isGzipped bool
  212. }
  213. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  214. return VisibleInterval{
  215. start: start,
  216. stop: stop,
  217. fileId: fileId,
  218. modifiedTime: modifiedTime,
  219. chunkOffset: chunkOffset, // the starting position in the chunk
  220. chunkSize: chunkSize,
  221. cipherKey: cipherKey,
  222. isGzipped: isGzipped,
  223. }
  224. }
  225. func min(x, y int64) int64 {
  226. if x <= y {
  227. return x
  228. }
  229. return y
  230. }
  231. func max(x, y int64) int64 {
  232. if x <= y {
  233. return y
  234. }
  235. return x
  236. }