You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

316 lines
10 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. package filer2
  2. import (
  3. "log"
  4. "testing"
  5. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  6. )
  7. func TestCompactFileChunks(t *testing.T) {
  8. chunks := []*filer_pb.FileChunk{
  9. {Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
  10. {Offset: 100, Size: 100, FileId: "def", Mtime: 100},
  11. {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
  12. {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
  13. }
  14. compacted, garbarge := CompactFileChunks(chunks)
  15. log.Printf("Compacted: %+v", compacted)
  16. log.Printf("Garbage : %+v", garbarge)
  17. if len(compacted) != 3 {
  18. t.Fatalf("unexpected compacted: %d", len(compacted))
  19. }
  20. if len(garbarge) != 1 {
  21. t.Fatalf("unexpected garbarge: %d", len(garbarge))
  22. }
  23. }
  24. func TestIntervalMerging(t *testing.T) {
  25. testcases := []struct {
  26. Chunks []*filer_pb.FileChunk
  27. Expected []*visibleInterval
  28. }{
  29. // case 0: normal
  30. {
  31. Chunks: []*filer_pb.FileChunk{
  32. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  33. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  34. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  35. },
  36. Expected: []*visibleInterval{
  37. {start: 0, stop: 100, fileId: "abc"},
  38. {start: 100, stop: 200, fileId: "asdf"},
  39. {start: 200, stop: 300, fileId: "fsad"},
  40. },
  41. },
  42. // case 1: updates overwrite full chunks
  43. {
  44. Chunks: []*filer_pb.FileChunk{
  45. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  46. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  47. },
  48. Expected: []*visibleInterval{
  49. {start: 0, stop: 200, fileId: "asdf"},
  50. },
  51. },
  52. // case 2: updates overwrite part of previous chunks
  53. {
  54. Chunks: []*filer_pb.FileChunk{
  55. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  56. {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
  57. },
  58. Expected: []*visibleInterval{
  59. {start: 0, stop: 50, fileId: "asdf"},
  60. {start: 50, stop: 100, fileId: "abc"},
  61. },
  62. },
  63. // case 3: updates overwrite full chunks
  64. {
  65. Chunks: []*filer_pb.FileChunk{
  66. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  67. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  68. {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
  69. },
  70. Expected: []*visibleInterval{
  71. {start: 0, stop: 50, fileId: "asdf"},
  72. {start: 50, stop: 300, fileId: "xxxx"},
  73. },
  74. },
  75. // case 4: updates far away from prev chunks
  76. {
  77. Chunks: []*filer_pb.FileChunk{
  78. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  79. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  80. {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
  81. },
  82. Expected: []*visibleInterval{
  83. {start: 0, stop: 200, fileId: "asdf"},
  84. {start: 250, stop: 500, fileId: "xxxx"},
  85. },
  86. },
  87. // case 5: updates overwrite full chunks
  88. {
  89. Chunks: []*filer_pb.FileChunk{
  90. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  91. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
  92. {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
  93. {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
  94. },
  95. Expected: []*visibleInterval{
  96. {start: 0, stop: 200, fileId: "asdf"},
  97. {start: 200, stop: 220, fileId: "abc"},
  98. },
  99. },
  100. // case 6: same updates
  101. {
  102. Chunks: []*filer_pb.FileChunk{
  103. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  104. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  105. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  106. },
  107. Expected: []*visibleInterval{
  108. {start: 0, stop: 100, fileId: "abc"},
  109. },
  110. },
  111. // case 7: real updates
  112. {
  113. Chunks: []*filer_pb.FileChunk{
  114. {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
  115. {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
  116. {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
  117. {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
  118. {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
  119. {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
  120. },
  121. Expected: []*visibleInterval{
  122. {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
  123. {start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
  124. {start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
  125. {start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
  126. {start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
  127. },
  128. },
  129. }
  130. for i, testcase := range testcases {
  131. log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
  132. intervals := nonOverlappingVisibleIntervals(testcase.Chunks)
  133. for x, interval := range intervals {
  134. log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
  135. i, x, interval.start, interval.stop, interval.fileId)
  136. }
  137. for x, interval := range intervals {
  138. if interval.start != testcase.Expected[x].start {
  139. t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
  140. i, x, interval.start, testcase.Expected[x].start)
  141. }
  142. if interval.stop != testcase.Expected[x].stop {
  143. t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
  144. i, x, interval.stop, testcase.Expected[x].stop)
  145. }
  146. if interval.fileId != testcase.Expected[x].fileId {
  147. t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
  148. i, x, interval.fileId, testcase.Expected[x].fileId)
  149. }
  150. }
  151. if len(intervals) != len(testcase.Expected) {
  152. t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
  153. }
  154. }
  155. }
  156. func TestChunksReading(t *testing.T) {
  157. testcases := []struct {
  158. Chunks []*filer_pb.FileChunk
  159. Offset int64
  160. Size int
  161. Expected []*ChunkView
  162. }{
  163. // case 0: normal
  164. {
  165. Chunks: []*filer_pb.FileChunk{
  166. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  167. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  168. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  169. },
  170. Offset: 0,
  171. Size: 250,
  172. Expected: []*ChunkView{
  173. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  174. {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
  175. {Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
  176. },
  177. },
  178. // case 1: updates overwrite full chunks
  179. {
  180. Chunks: []*filer_pb.FileChunk{
  181. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  182. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  183. },
  184. Offset: 50,
  185. Size: 100,
  186. Expected: []*ChunkView{
  187. {Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
  188. },
  189. },
  190. // case 2: updates overwrite part of previous chunks
  191. {
  192. Chunks: []*filer_pb.FileChunk{
  193. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  194. {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
  195. },
  196. Offset: 25,
  197. Size: 50,
  198. Expected: []*ChunkView{
  199. {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
  200. {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
  201. },
  202. },
  203. // case 3: updates overwrite full chunks
  204. {
  205. Chunks: []*filer_pb.FileChunk{
  206. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  207. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  208. {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
  209. },
  210. Offset: 0,
  211. Size: 200,
  212. Expected: []*ChunkView{
  213. {Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
  214. {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
  215. },
  216. },
  217. // case 4: updates far away from prev chunks
  218. {
  219. Chunks: []*filer_pb.FileChunk{
  220. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  221. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  222. {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
  223. },
  224. Offset: 0,
  225. Size: 400,
  226. Expected: []*ChunkView{
  227. {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
  228. // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
  229. },
  230. },
  231. // case 5: updates overwrite full chunks
  232. {
  233. Chunks: []*filer_pb.FileChunk{
  234. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  235. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
  236. {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
  237. {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
  238. },
  239. Offset: 0,
  240. Size: 220,
  241. Expected: []*ChunkView{
  242. {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
  243. {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
  244. },
  245. },
  246. // case 6: same updates
  247. {
  248. Chunks: []*filer_pb.FileChunk{
  249. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  250. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  251. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  252. },
  253. Offset: 0,
  254. Size: 100,
  255. Expected: []*ChunkView{
  256. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  257. },
  258. },
  259. // case 7: edge cases
  260. {
  261. Chunks: []*filer_pb.FileChunk{
  262. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  263. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  264. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  265. },
  266. Offset: 0,
  267. Size: 200,
  268. Expected: []*ChunkView{
  269. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  270. {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
  271. },
  272. },
  273. }
  274. for i, testcase := range testcases {
  275. log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
  276. chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
  277. for x, chunk := range chunks {
  278. log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
  279. i, x, chunk.Offset, chunk.Size, chunk.FileId)
  280. if chunk.Offset != testcase.Expected[x].Offset {
  281. t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
  282. i, x, chunk.Offset, testcase.Expected[x].Offset)
  283. }
  284. if chunk.Size != testcase.Expected[x].Size {
  285. t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
  286. i, x, chunk.Size, testcase.Expected[x].Size)
  287. }
  288. if chunk.FileId != testcase.Expected[x].FileId {
  289. t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
  290. i, x, chunk.FileId, testcase.Expected[x].FileId)
  291. }
  292. if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
  293. t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
  294. i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
  295. }
  296. }
  297. if len(chunks) != len(testcase.Expected) {
  298. t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
  299. }
  300. }
  301. }