You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

319 lines
10 KiB

7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
6 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
  1. package filer2
  2. import (
  3. "log"
  4. "testing"
  5. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  6. )
  7. func TestCompactFileChunks(t *testing.T) {
  8. chunks := []*filer_pb.FileChunk{
  9. {Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
  10. {Offset: 100, Size: 100, FileId: "def", Mtime: 100},
  11. {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
  12. {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
  13. }
  14. compacted, garbarge := CompactFileChunks(chunks)
  15. log.Printf("Compacted: %+v", compacted)
  16. log.Printf("Garbage : %+v", garbarge)
  17. if len(compacted) != 3 {
  18. t.Fatalf("unexpected compacted: %d", len(compacted))
  19. }
  20. if len(garbarge) != 1 {
  21. t.Fatalf("unexpected garbarge: %d", len(garbarge))
  22. }
  23. }
  24. func TestIntervalMerging(t *testing.T) {
  25. testcases := []struct {
  26. Chunks []*filer_pb.FileChunk
  27. Expected []*visibleInterval
  28. }{
  29. // case 0: normal
  30. {
  31. Chunks: []*filer_pb.FileChunk{
  32. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  33. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  34. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  35. },
  36. Expected: []*visibleInterval{
  37. {start: 0, stop: 100, fileId: "abc"},
  38. {start: 100, stop: 200, fileId: "asdf"},
  39. {start: 200, stop: 300, fileId: "fsad"},
  40. },
  41. },
  42. // case 1: updates overwrite full chunks
  43. {
  44. Chunks: []*filer_pb.FileChunk{
  45. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  46. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  47. },
  48. Expected: []*visibleInterval{
  49. {start: 0, stop: 200, fileId: "asdf"},
  50. },
  51. },
  52. // case 2: updates overwrite part of previous chunks
  53. {
  54. Chunks: []*filer_pb.FileChunk{
  55. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  56. {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
  57. },
  58. Expected: []*visibleInterval{
  59. {start: 0, stop: 50, fileId: "asdf"},
  60. {start: 50, stop: 100, fileId: "abc"},
  61. },
  62. },
  63. // case 3: updates overwrite full chunks
  64. {
  65. Chunks: []*filer_pb.FileChunk{
  66. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  67. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  68. {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
  69. },
  70. Expected: []*visibleInterval{
  71. {start: 0, stop: 50, fileId: "asdf"},
  72. {start: 50, stop: 300, fileId: "xxxx"},
  73. },
  74. },
  75. // case 4: updates far away from prev chunks
  76. {
  77. Chunks: []*filer_pb.FileChunk{
  78. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  79. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  80. {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
  81. },
  82. Expected: []*visibleInterval{
  83. {start: 0, stop: 200, fileId: "asdf"},
  84. {start: 250, stop: 500, fileId: "xxxx"},
  85. },
  86. },
  87. // case 5: updates overwrite full chunks
  88. {
  89. Chunks: []*filer_pb.FileChunk{
  90. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  91. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
  92. {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
  93. {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
  94. },
  95. Expected: []*visibleInterval{
  96. {start: 0, stop: 200, fileId: "asdf"},
  97. {start: 200, stop: 220, fileId: "abc"},
  98. },
  99. },
  100. // case 6: same updates
  101. {
  102. Chunks: []*filer_pb.FileChunk{
  103. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  104. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  105. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  106. },
  107. Expected: []*visibleInterval{
  108. {start: 0, stop: 100, fileId: "abc"},
  109. },
  110. },
  111. // case 7: real updates
  112. {
  113. Chunks: []*filer_pb.FileChunk{
  114. {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
  115. {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
  116. {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
  117. {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
  118. {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
  119. {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
  120. },
  121. Expected: []*visibleInterval{
  122. {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
  123. {start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
  124. {start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
  125. {start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
  126. {start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
  127. },
  128. },
  129. }
  130. for i, testcase := range testcases {
  131. log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
  132. intervals := nonOverlappingVisibleIntervals(testcase.Chunks)
  133. for x, interval := range intervals {
  134. log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
  135. i, x, interval.start, interval.stop, interval.fileId)
  136. }
  137. for x, interval := range intervals {
  138. if interval.start != testcase.Expected[x].start {
  139. t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
  140. i, x, interval.start, testcase.Expected[x].start)
  141. }
  142. if interval.stop != testcase.Expected[x].stop {
  143. t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
  144. i, x, interval.stop, testcase.Expected[x].stop)
  145. }
  146. if interval.fileId != testcase.Expected[x].fileId {
  147. t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
  148. i, x, interval.fileId, testcase.Expected[x].fileId)
  149. }
  150. }
  151. if len(intervals) != len(testcase.Expected) {
  152. t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
  153. }
  154. cleanupIntervals(intervals)
  155. }
  156. }
  157. func TestChunksReading(t *testing.T) {
  158. testcases := []struct {
  159. Chunks []*filer_pb.FileChunk
  160. Offset int64
  161. Size int
  162. Expected []*ChunkView
  163. }{
  164. // case 0: normal
  165. {
  166. Chunks: []*filer_pb.FileChunk{
  167. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  168. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  169. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  170. },
  171. Offset: 0,
  172. Size: 250,
  173. Expected: []*ChunkView{
  174. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  175. {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
  176. {Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
  177. },
  178. },
  179. // case 1: updates overwrite full chunks
  180. {
  181. Chunks: []*filer_pb.FileChunk{
  182. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  183. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  184. },
  185. Offset: 50,
  186. Size: 100,
  187. Expected: []*ChunkView{
  188. {Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
  189. },
  190. },
  191. // case 2: updates overwrite part of previous chunks
  192. {
  193. Chunks: []*filer_pb.FileChunk{
  194. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  195. {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
  196. },
  197. Offset: 25,
  198. Size: 50,
  199. Expected: []*ChunkView{
  200. {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
  201. {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
  202. },
  203. },
  204. // case 3: updates overwrite full chunks
  205. {
  206. Chunks: []*filer_pb.FileChunk{
  207. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  208. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  209. {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
  210. },
  211. Offset: 0,
  212. Size: 200,
  213. Expected: []*ChunkView{
  214. {Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
  215. {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
  216. },
  217. },
  218. // case 4: updates far away from prev chunks
  219. {
  220. Chunks: []*filer_pb.FileChunk{
  221. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  222. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
  223. {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
  224. },
  225. Offset: 0,
  226. Size: 400,
  227. Expected: []*ChunkView{
  228. {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
  229. // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
  230. },
  231. },
  232. // case 5: updates overwrite full chunks
  233. {
  234. Chunks: []*filer_pb.FileChunk{
  235. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  236. {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
  237. {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
  238. {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
  239. },
  240. Offset: 0,
  241. Size: 220,
  242. Expected: []*ChunkView{
  243. {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
  244. {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
  245. },
  246. },
  247. // case 6: same updates
  248. {
  249. Chunks: []*filer_pb.FileChunk{
  250. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  251. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  252. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  253. },
  254. Offset: 0,
  255. Size: 100,
  256. Expected: []*ChunkView{
  257. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  258. },
  259. },
  260. // case 7: edge cases
  261. {
  262. Chunks: []*filer_pb.FileChunk{
  263. {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
  264. {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
  265. {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
  266. },
  267. Offset: 0,
  268. Size: 200,
  269. Expected: []*ChunkView{
  270. {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
  271. {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
  272. },
  273. },
  274. }
  275. for i, testcase := range testcases {
  276. log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
  277. chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
  278. for x, chunk := range chunks {
  279. log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
  280. i, x, chunk.Offset, chunk.Size, chunk.FileId)
  281. if chunk.Offset != testcase.Expected[x].Offset {
  282. t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
  283. i, x, chunk.Offset, testcase.Expected[x].Offset)
  284. }
  285. if chunk.Size != testcase.Expected[x].Size {
  286. t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
  287. i, x, chunk.Size, testcase.Expected[x].Size)
  288. }
  289. if chunk.FileId != testcase.Expected[x].FileId {
  290. t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
  291. i, x, chunk.FileId, testcase.Expected[x].FileId)
  292. }
  293. if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
  294. t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
  295. i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
  296. }
  297. }
  298. if len(chunks) != len(testcase.Expected) {
  299. t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
  300. }
  301. }
  302. }