You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

334 lines
9.0 KiB

7 years ago
6 years ago
7 years ago
7 years ago
3 years ago
2 years ago
2 years ago
2 years ago
6 years ago
7 years ago
7 years ago
6 years ago
6 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
7 years ago
4 years ago
6 years ago
6 years ago
4 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
5 years ago
7 years ago
7 years ago
  1. package filer
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/wdclient"
  6. "golang.org/x/exp/slices"
  7. "math"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/util"
  10. )
  11. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  12. for _, c := range chunks {
  13. t := uint64(c.Offset + int64(c.Size))
  14. if size < t {
  15. size = t
  16. }
  17. }
  18. return
  19. }
  20. func FileSize(entry *filer_pb.Entry) (size uint64) {
  21. if entry == nil || entry.Attributes == nil {
  22. return 0
  23. }
  24. fileSize := entry.Attributes.FileSize
  25. if entry.RemoteEntry != nil {
  26. if entry.RemoteEntry.RemoteMtime > entry.Attributes.Mtime {
  27. fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
  28. }
  29. }
  30. return maxUint64(TotalSize(entry.Chunks), fileSize)
  31. }
  32. func ETag(entry *filer_pb.Entry) (etag string) {
  33. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  34. return ETagChunks(entry.Chunks)
  35. }
  36. return fmt.Sprintf("%x", entry.Attributes.Md5)
  37. }
  38. func ETagEntry(entry *Entry) (etag string) {
  39. if entry.Attr.Md5 == nil {
  40. return ETagChunks(entry.Chunks)
  41. }
  42. return fmt.Sprintf("%x", entry.Attr.Md5)
  43. }
  44. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  45. if len(chunks) == 1 {
  46. return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
  47. }
  48. var md5Digests [][]byte
  49. for _, c := range chunks {
  50. md5Digests = append(md5Digests, util.Base64Md5ToBytes(c.ETag))
  51. }
  52. return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks))
  53. }
  54. func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  55. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
  56. fileIds := make(map[string]bool)
  57. for _, interval := range visibles {
  58. fileIds[interval.fileId] = true
  59. }
  60. for _, chunk := range chunks {
  61. if _, found := fileIds[chunk.GetFileIdString()]; found {
  62. compacted = append(compacted, chunk)
  63. } else {
  64. garbage = append(garbage, chunk)
  65. }
  66. }
  67. return
  68. }
  69. func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  70. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
  71. if aErr != nil {
  72. return nil, aErr
  73. }
  74. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
  75. if bErr != nil {
  76. return nil, bErr
  77. }
  78. delta = append(delta, DoMinusChunks(aData, bData)...)
  79. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  80. return
  81. }
  82. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  83. fileIds := make(map[string]bool)
  84. for _, interval := range bs {
  85. fileIds[interval.GetFileIdString()] = true
  86. }
  87. for _, chunk := range as {
  88. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  89. delta = append(delta, chunk)
  90. }
  91. }
  92. return
  93. }
  94. func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  95. fileIds := make(map[string]bool)
  96. for _, interval := range bs {
  97. fileIds[interval.GetFileIdString()] = true
  98. fileIds[interval.GetSourceFileId()] = true
  99. }
  100. for _, chunk := range as {
  101. _, sourceFileIdFound := fileIds[chunk.GetSourceFileId()]
  102. _, fileIdFound := fileIds[chunk.GetFileId()]
  103. if !sourceFileIdFound && !fileIdFound {
  104. delta = append(delta, chunk)
  105. }
  106. }
  107. return
  108. }
  109. type ChunkView struct {
  110. FileId string
  111. Offset int64
  112. Size uint64
  113. LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
  114. ChunkSize uint64
  115. CipherKey []byte
  116. IsGzipped bool
  117. }
  118. func (cv *ChunkView) IsFullChunk() bool {
  119. return cv.Size == cv.ChunkSize
  120. }
  121. func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  122. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
  123. return ViewFromVisibleIntervals(visibles, offset, size)
  124. }
  125. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  126. stop := offset + size
  127. if size == math.MaxInt64 {
  128. stop = math.MaxInt64
  129. }
  130. if stop < offset {
  131. stop = math.MaxInt64
  132. }
  133. for _, chunk := range visibles {
  134. chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
  135. if chunkStart < chunkStop {
  136. views = append(views, &ChunkView{
  137. FileId: chunk.fileId,
  138. Offset: chunkStart - chunk.start + chunk.chunkOffset,
  139. Size: uint64(chunkStop - chunkStart),
  140. LogicOffset: chunkStart,
  141. ChunkSize: chunk.chunkSize,
  142. CipherKey: chunk.cipherKey,
  143. IsGzipped: chunk.isGzipped,
  144. })
  145. }
  146. }
  147. return views
  148. }
  149. func logPrintf(name string, visibles []VisibleInterval) {
  150. /*
  151. glog.V(0).Infof("%s len %d", name, len(visibles))
  152. for _, v := range visibles {
  153. glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
  154. }
  155. */
  156. }
  157. func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
  158. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  159. length := len(visibles)
  160. if length == 0 {
  161. return append(visibles, newV)
  162. }
  163. last := visibles[length-1]
  164. if last.stop <= chunk.Offset {
  165. return append(visibles, newV)
  166. }
  167. logPrintf(" before", visibles)
  168. // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
  169. chunkStop := chunk.Offset + int64(chunk.Size)
  170. for _, v := range visibles {
  171. if v.start < chunk.Offset && chunk.Offset < v.stop {
  172. t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
  173. newVisibles = append(newVisibles, t)
  174. // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  175. }
  176. if v.start < chunkStop && chunkStop < v.stop {
  177. t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
  178. newVisibles = append(newVisibles, t)
  179. // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  180. }
  181. if chunkStop <= v.start || v.stop <= chunk.Offset {
  182. newVisibles = append(newVisibles, v)
  183. // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
  184. }
  185. }
  186. newVisibles = append(newVisibles, newV)
  187. logPrintf(" append", newVisibles)
  188. for i := len(newVisibles) - 1; i >= 0; i-- {
  189. if i > 0 && newV.start < newVisibles[i-1].start {
  190. newVisibles[i] = newVisibles[i-1]
  191. } else {
  192. newVisibles[i] = newV
  193. break
  194. }
  195. }
  196. logPrintf(" sorted", newVisibles)
  197. return newVisibles
  198. }
  199. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  200. // If the file chunk content is a chunk manifest
  201. func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
  202. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
  203. if err != nil {
  204. return
  205. }
  206. visibles2 := readResolvedChunks(chunks)
  207. if true {
  208. return visibles2, err
  209. }
  210. slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
  211. if a.Mtime == b.Mtime {
  212. filer_pb.EnsureFid(a)
  213. filer_pb.EnsureFid(b)
  214. if a.Fid == nil || b.Fid == nil {
  215. return true
  216. }
  217. return a.Fid.FileKey < b.Fid.FileKey
  218. }
  219. return a.Mtime < b.Mtime
  220. })
  221. for _, chunk := range chunks {
  222. // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
  223. visibles = MergeIntoVisibles(visibles, chunk)
  224. logPrintf("add", visibles)
  225. }
  226. if len(visibles) != len(visibles2) {
  227. fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2))
  228. } else {
  229. for i := 0; i < len(visibles); i++ {
  230. checkDifference(visibles[i], visibles2[i])
  231. }
  232. }
  233. return
  234. }
  235. func checkDifference(x, y VisibleInterval) {
  236. if x.start != y.start ||
  237. x.stop != y.stop ||
  238. x.fileId != y.fileId ||
  239. x.modifiedTime != y.modifiedTime {
  240. fmt.Printf("different visible %+v : %+v\n", x, y)
  241. }
  242. }
  243. // find non-overlapping visible intervals
  244. // visible interval map to one file chunk
  245. type VisibleInterval struct {
  246. start int64
  247. stop int64
  248. modifiedTime int64
  249. fileId string
  250. chunkOffset int64
  251. chunkSize uint64
  252. cipherKey []byte
  253. isGzipped bool
  254. }
  255. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  256. return VisibleInterval{
  257. start: start,
  258. stop: stop,
  259. fileId: fileId,
  260. modifiedTime: modifiedTime,
  261. chunkOffset: chunkOffset, // the starting position in the chunk
  262. chunkSize: chunkSize,
  263. cipherKey: cipherKey,
  264. isGzipped: isGzipped,
  265. }
  266. }
  267. func min(x, y int64) int64 {
  268. if x <= y {
  269. return x
  270. }
  271. return y
  272. }
  273. func max(x, y int64) int64 {
  274. if x <= y {
  275. return y
  276. }
  277. return x
  278. }