You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

47 lines
1.2 KiB

3 years ago
3 years ago
3 years ago
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2 years ago
3 years ago
more solid weed mount (#4089) * compare chunks by timestamp * fix slab clearing error * fix test compilation * move oldest chunk to sealed, instead of by fullness * lock on fh.entryViewCache * remove verbose logs * revert slat clearing * less logs * less logs * track write and read by timestamp * remove useless logic * add entry lock on file handle release * use mem chunk only, swap file chunk has problems * comment out code that maybe used later * add debug mode to compare data read and write * more efficient readResolvedChunks with linked list * small optimization * fix test compilation * minor fix on writer * add SeparateGarbageChunks * group chunks into sections * turn off debug mode * fix tests * fix tests * tmp enable swap file chunk * Revert "tmp enable swap file chunk" This reverts commit 985137ec472924e4815f258189f6ca9f2168a0a7. * simple refactoring * simple refactoring * do not re-use swap file chunk. Sealed chunks should not be re-used. * comment out debugging facilities * either mem chunk or swap file chunk is fine now * remove orderedMutex as *semaphore.Weighted not found impactful * optimize size calculation for changing large files * optimize performance to avoid going through the long list of chunks * still problems with swap file chunk * rename * tiny optimization * swap file chunk save only successfully read data * fix * enable both mem and swap file chunk * resolve chunks with range * rename * fix chunk interval list * also change file handle chunk group when adding chunks * pick in-active chunk with time-decayed counter * fix compilation * avoid nil with empty fh.entry * refactoring * rename * rename * refactor visible intervals to *list.List * refactor chunkViews to *list.List * add IntervalList for generic interval list * change visible interval to use IntervalList in generics * cahnge chunkViews to *IntervalList[*ChunkView] * use NewFileChunkSection to create * rename variables * refactor * fix renaming leftover * renaming * renaming * add insert interval * interval list adds lock * incrementally add chunks to readers Fixes: 1. set start and stop offset for the value object 2. clone the value object 3. use pointer instead of copy-by-value when passing to interval.Value 4. use insert interval since adding chunk could be out of order * fix tests compilation * fix tests compilation
2 years ago
3 years ago
  1. package page_writer
  2. import (
  3. "github.com/seaweedfs/seaweedfs/weed/util"
  4. "testing"
  5. )
  6. func TestUploadPipeline(t *testing.T) {
  7. uploadPipeline := NewUploadPipeline(nil, 2*1024*1024, nil, 16, "")
  8. writeRange(uploadPipeline, 0, 131072)
  9. writeRange(uploadPipeline, 131072, 262144)
  10. writeRange(uploadPipeline, 262144, 1025536)
  11. confirmRange(t, uploadPipeline, 0, 1025536)
  12. writeRange(uploadPipeline, 1025536, 1296896)
  13. confirmRange(t, uploadPipeline, 1025536, 1296896)
  14. writeRange(uploadPipeline, 1296896, 2162688)
  15. confirmRange(t, uploadPipeline, 1296896, 2162688)
  16. confirmRange(t, uploadPipeline, 1296896, 2162688)
  17. }
  18. // startOff and stopOff must be divided by 4
  19. func writeRange(uploadPipeline *UploadPipeline, startOff, stopOff int64) {
  20. p := make([]byte, 4)
  21. for i := startOff / 4; i < stopOff/4; i += 4 {
  22. util.Uint32toBytes(p, uint32(i))
  23. uploadPipeline.SaveDataAt(p, i, false, 0)
  24. }
  25. }
  26. func confirmRange(t *testing.T, uploadPipeline *UploadPipeline, startOff, stopOff int64) {
  27. p := make([]byte, 4)
  28. for i := startOff; i < stopOff/4; i += 4 {
  29. uploadPipeline.MaybeReadDataAt(p, i, 0)
  30. x := util.BytesToUint32(p)
  31. if x != uint32(i) {
  32. t.Errorf("expecting %d found %d at offset [%d,%d)", i, x, i, i+4)
  33. }
  34. }
  35. }