Browse Source

refactor filer proto chunk variable from mtime to modified_ts_ns

pull/3924/head
chrislu 2 years ago
parent
commit
ea2637734a
  1. 2
      other/java/client/pom.xml
  2. 2
      other/java/client/pom.xml.deploy
  3. 2
      other/java/client/pom_debug.xml
  4. 4
      other/java/client/src/main/java/seaweedfs/client/ReadChunks.java
  5. 2
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  6. 2
      other/java/client/src/main/proto/filer.proto
  7. 16
      other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java
  8. 4
      other/java/examples/pom.xml
  9. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  10. 2
      other/java/hdfs2/pom.xml
  11. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  12. 2
      other/java/hdfs3/pom.xml
  13. 16
      weed/filer/filechunks.go
  14. 36
      weed/filer/filechunks2_test.go
  15. 6
      weed/filer/filechunks_read.go
  16. 168
      weed/filer/filechunks_read_test.go
  17. 164
      weed/filer/filechunks_test.go
  18. 2
      weed/filer/filer_notify_test.go
  19. 2
      weed/mount/dirty_pages_chunked.go
  20. 4
      weed/mount/filehandle.go
  21. 2
      weed/operation/upload_content.go
  22. 2
      weed/pb/filer.proto
  23. 31
      weed/pb/filer_pb/filer.pb.go
  24. 4
      weed/pb/volume_server_pb/volume_server.pb.go
  25. 8
      weed/pb/volume_server_pb/volume_server_grpc.pb.go
  26. 2
      weed/replication/sink/filersink/fetch_write.go
  27. 12
      weed/s3api/filer_multipart.go
  28. 8
      weed/server/filer_grpc_server_remote.go
  29. 2
      weed/shell/command_volume_fsck.go

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>3.30</version>
<version>3.33</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>3.30</version>
<version>3.33</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>3.30</version>
<version>3.33</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

4
other/java/client/src/main/java/seaweedfs/client/ReadChunks.java

@ -83,7 +83,7 @@ public class ReadChunks {
prevX, prevX,
point.x, point.x,
chunk.getFileId(), chunk.getFileId(),
chunk.getMtime(),
chunk.getModifiedTsNs(),
prevX - chunk.getOffset(), prevX - chunk.getOffset(),
chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x, chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x,
chunk.getCipherKey().toByteArray(), chunk.getCipherKey().toByteArray(),
@ -100,7 +100,7 @@ public class ReadChunks {
public Point(long x, FilerProto.FileChunk chunk, boolean isStart) { public Point(long x, FilerProto.FileChunk chunk, boolean isStart) {
this.x = x; this.x = x;
this.ts = chunk.getMtime();
this.ts = chunk.getModifiedTsNs();
this.chunk = chunk; this.chunk = chunk;
this.isStart = isStart; this.isStart = isStart;
} }

2
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -98,7 +98,7 @@ public class SeaweedWrite {
.setFileId(fileId) .setFileId(fileId)
.setOffset(offset) .setOffset(offset)
.setSize(bytesLength) .setSize(bytesLength)
.setMtime(System.currentTimeMillis() / 10000L)
.setModifiedTsNs(System.nanoTime())
.setETag(etag) .setETag(etag)
.setCipherKey(cipherKeyString); .setCipherKey(cipherKeyString);
} }

2
other/java/client/src/main/proto/filer.proto

@ -132,7 +132,7 @@ message FileChunk {
string file_id = 1; // to be deprecated string file_id = 1; // to be deprecated
int64 offset = 2; int64 offset = 2;
uint64 size = 3; uint64 size = 3;
int64 mtime = 4;
int64 modified_ts_ns = 4;
string e_tag = 5; string e_tag = 5;
string source_file_id = 6; // to be deprecated string source_file_id = 6; // to be deprecated
FileId fid = 7; FileId fid = 7;

16
other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java

@ -17,13 +17,13 @@ public class SeaweedReadTest {
.setFileId("aaa") .setFileId("aaa")
.setOffset(0) .setOffset(0)
.setSize(100) .setSize(100)
.setMtime(1000)
.setModifiedTsNs(1000)
.build()); .build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("bbb") .setFileId("bbb")
.setOffset(100) .setOffset(100)
.setSize(133) .setSize(133)
.setMtime(2000)
.setModifiedTsNs(2000)
.build()); .build());
List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks); List<SeaweedRead.VisibleInterval> visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks);
@ -70,31 +70,31 @@ public class SeaweedReadTest {
.setFileId("a") .setFileId("a")
.setOffset(0) .setOffset(0)
.setSize(100) .setSize(100)
.setMtime(1)
.setModifiedTsNs(1)
.build()); .build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("b") .setFileId("b")
.setOffset(50) .setOffset(50)
.setSize(100) .setSize(100)
.setMtime(2)
.setModifiedTsNs(2)
.build()); .build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("c") .setFileId("c")
.setOffset(200) .setOffset(200)
.setSize(50) .setSize(50)
.setMtime(3)
.setModifiedTsNs(3)
.build()); .build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("d") .setFileId("d")
.setOffset(250) .setOffset(250)
.setSize(50) .setSize(50)
.setMtime(4)
.setModifiedTsNs(4)
.build()); .build());
chunks.add(FilerProto.FileChunk.newBuilder() chunks.add(FilerProto.FileChunk.newBuilder()
.setFileId("e") .setFileId("e")
.setOffset(175) .setOffset(175)
.setSize(100) .setSize(100)
.setMtime(5)
.setModifiedTsNs(5)
.build()); .build());
List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks); List<SeaweedRead.VisibleInterval> visibleIntervals = ReadChunks.readResolvedChunks(chunks);
@ -161,7 +161,7 @@ public class SeaweedReadTest {
.setFileId("") .setFileId("")
.setOffset(start) .setOffset(start)
.setSize(size) .setSize(size)
.setMtime(ts)
.setModifiedTsNs(ts)
.build(); .build();
} }
} }

4
other/java/examples/pom.xml

@ -11,13 +11,13 @@
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>3.30</version>
<version>3.33</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-hadoop2-client</artifactId> <artifactId>seaweedfs-hadoop2-client</artifactId>
<version>3.30</version>
<version>3.33</version>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency> <dependency>

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -317,7 +317,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>3.30</seaweedfs.client.version>
<seaweedfs.client.version>3.33</seaweedfs.client.version>
<hadoop.version>3.2.4</hadoop.version> <hadoop.version>3.2.4</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>3.30</seaweedfs.client.version>
<seaweedfs.client.version>3.33</seaweedfs.client.version>
<hadoop.version>3.2.4</hadoop.version> <hadoop.version>3.2.4</hadoop.version>
</properties> </properties>

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -317,7 +317,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>3.30</seaweedfs.client.version>
<seaweedfs.client.version>3.33</seaweedfs.client.version>
<hadoop.version>3.2.4</hadoop.version> <hadoop.version>3.2.4</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>3.30</seaweedfs.client.version>
<seaweedfs.client.version>3.33</seaweedfs.client.version>
<hadoop.version>3.2.4</hadoop.version> <hadoop.version>3.2.4</hadoop.version>
</properties> </properties>

16
weed/filer/filechunks.go

@ -192,7 +192,7 @@ func logPrintf(name string, visibles []VisibleInterval) {
func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.ModifiedTsNs, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
length := len(visibles) length := len(visibles)
if length == 0 { if length == 0 {
@ -208,12 +208,12 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
chunkStop := chunk.Offset + int64(chunk.Size) chunkStop := chunk.Offset + int64(chunk.Size)
for _, v := range visibles { for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop { if v.start < chunk.Offset && chunk.Offset < v.stop {
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTsNs, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t) newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
} }
if v.start < chunkStop && chunkStop < v.stop { if v.start < chunkStop && chunkStop < v.stop {
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTsNs, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t) newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
} }
@ -254,7 +254,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction
return visibles2, err return visibles2, err
} }
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
if a.Mtime == b.Mtime {
if a.ModifiedTsNs == b.ModifiedTsNs {
filer_pb.EnsureFid(a) filer_pb.EnsureFid(a)
filer_pb.EnsureFid(b) filer_pb.EnsureFid(b)
if a.Fid == nil || b.Fid == nil { if a.Fid == nil || b.Fid == nil {
@ -262,7 +262,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction
} }
return a.Fid.FileKey < b.Fid.FileKey return a.Fid.FileKey < b.Fid.FileKey
} }
return a.Mtime < b.Mtime
return a.ModifiedTsNs < b.ModifiedTsNs
}) })
for _, chunk := range chunks { for _, chunk := range chunks {
@ -288,7 +288,7 @@ func checkDifference(x, y VisibleInterval) {
if x.start != y.start || if x.start != y.start ||
x.stop != y.stop || x.stop != y.stop ||
x.fileId != y.fileId || x.fileId != y.fileId ||
x.modifiedTime != y.modifiedTime {
x.modifiedTsNs != y.modifiedTsNs {
fmt.Printf("different visible %+v : %+v\n", x, y) fmt.Printf("different visible %+v : %+v\n", x, y)
} }
} }
@ -299,7 +299,7 @@ func checkDifference(x, y VisibleInterval) {
type VisibleInterval struct { type VisibleInterval struct {
start int64 start int64
stop int64 stop int64
modifiedTime int64
modifiedTsNs int64
fileId string fileId string
chunkOffset int64 chunkOffset int64
chunkSize uint64 chunkSize uint64
@ -312,7 +312,7 @@ func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, ch
start: start, start: start,
stop: stop, stop: stop,
fileId: fileId, fileId: fileId,
modifiedTime: modifiedTime,
modifiedTsNs: modifiedTime,
chunkOffset: chunkOffset, // the starting position in the chunk chunkOffset: chunkOffset, // the starting position in the chunk
chunkSize: chunkSize, chunkSize: chunkSize,
cipherKey: cipherKey, cipherKey: cipherKey,

36
weed/filer/filechunks2_test.go

@ -17,14 +17,14 @@ func TestDoMinusChunks(t *testing.T) {
// clusterA append a new line and then clusterB also append a new line // clusterA append a new line and then clusterB also append a new line
// clusterA append a new line again // clusterA append a new line again
chunksInA := []*filer_pb.FileChunk{ chunksInA := []*filer_pb.FileChunk{
{Offset: 0, Size: 3, FileId: "11", Mtime: 100},
{Offset: 3, Size: 3, FileId: "22", SourceFileId: "2", Mtime: 200},
{Offset: 6, Size: 3, FileId: "33", Mtime: 300},
{Offset: 0, Size: 3, FileId: "11", ModifiedTsNs: 100},
{Offset: 3, Size: 3, FileId: "22", SourceFileId: "2", ModifiedTsNs: 200},
{Offset: 6, Size: 3, FileId: "33", ModifiedTsNs: 300},
} }
chunksInB := []*filer_pb.FileChunk{ chunksInB := []*filer_pb.FileChunk{
{Offset: 0, Size: 3, FileId: "1", SourceFileId: "11", Mtime: 100},
{Offset: 3, Size: 3, FileId: "2", Mtime: 200},
{Offset: 6, Size: 3, FileId: "3", SourceFileId: "33", Mtime: 300},
{Offset: 0, Size: 3, FileId: "1", SourceFileId: "11", ModifiedTsNs: 100},
{Offset: 3, Size: 3, FileId: "2", ModifiedTsNs: 200},
{Offset: 6, Size: 3, FileId: "3", SourceFileId: "33", ModifiedTsNs: 300},
} }
// clusterB using command "echo 'content' > hello.txt" to overwrite file // clusterB using command "echo 'content' > hello.txt" to overwrite file
@ -50,17 +50,17 @@ func TestDoMinusChunks(t *testing.T) {
func TestCompactFileChunksRealCase(t *testing.T) { func TestCompactFileChunksRealCase(t *testing.T) {
chunks := []*filer_pb.FileChunk{ chunks := []*filer_pb.FileChunk{
{FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497},
{FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492},
{FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928},
{FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894},
{FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900},
{FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904},
{FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910},
{FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903},
{FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911},
{FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909},
{FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922},
{FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, ModifiedTsNs: 5320497},
{FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, ModifiedTsNs: 5320492},
{FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, ModifiedTsNs: 5325928},
{FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, ModifiedTsNs: 5325894},
{FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, ModifiedTsNs: 5325900},
{FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, ModifiedTsNs: 5325904},
{FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, ModifiedTsNs: 5325910},
{FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, ModifiedTsNs: 5325903},
{FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, ModifiedTsNs: 5325911},
{FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, ModifiedTsNs: 5325909},
{FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, ModifiedTsNs: 5325922},
} }
printChunks("before", chunks) printChunks("before", chunks)
@ -75,7 +75,7 @@ func TestCompactFileChunksRealCase(t *testing.T) {
func printChunks(name string, chunks []*filer_pb.FileChunk) { func printChunks(name string, chunks []*filer_pb.FileChunk) {
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
if a.Offset == b.Offset { if a.Offset == b.Offset {
return a.Mtime < b.Mtime
return a.ModifiedTsNs < b.ModifiedTsNs
} }
return a.Offset < b.Offset return a.Offset < b.Offset
}) })

6
weed/filer/filechunks_read.go

@ -11,13 +11,13 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva
for _, chunk := range chunks { for _, chunk := range chunks {
points = append(points, &Point{ points = append(points, &Point{
x: chunk.Offset, x: chunk.Offset,
ts: chunk.Mtime,
ts: chunk.ModifiedTsNs,
chunk: chunk, chunk: chunk,
isStart: true, isStart: true,
}) })
points = append(points, &Point{ points = append(points, &Point{
x: chunk.Offset + int64(chunk.Size), x: chunk.Offset + int64(chunk.Size),
ts: chunk.Mtime,
ts: chunk.ModifiedTsNs,
chunk: chunk, chunk: chunk,
isStart: false, isStart: false,
}) })
@ -98,7 +98,7 @@ func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, p
start: prevX, start: prevX,
stop: point.x, stop: point.x,
fileId: chunk.GetFileIdString(), fileId: chunk.GetFileIdString(),
modifiedTime: chunk.Mtime,
modifiedTsNs: chunk.ModifiedTsNs,
chunkOffset: prevX - chunk.Offset, chunkOffset: prevX - chunk.Offset,
chunkSize: chunk.Size, chunkSize: chunk.Size,
cipherKey: chunk.CipherKey, cipherKey: chunk.CipherKey,

168
weed/filer/filechunks_read_test.go

@ -11,41 +11,41 @@ func TestReadResolvedChunks(t *testing.T) {
chunks := []*filer_pb.FileChunk{ chunks := []*filer_pb.FileChunk{
{ {
FileId: "a",
Offset: 0,
Size: 100,
Mtime: 1,
FileId: "a",
Offset: 0,
Size: 100,
ModifiedTsNs: 1,
}, },
{ {
FileId: "b",
Offset: 50,
Size: 100,
Mtime: 2,
FileId: "b",
Offset: 50,
Size: 100,
ModifiedTsNs: 2,
}, },
{ {
FileId: "c",
Offset: 200,
Size: 50,
Mtime: 3,
FileId: "c",
Offset: 200,
Size: 50,
ModifiedTsNs: 3,
}, },
{ {
FileId: "d",
Offset: 250,
Size: 50,
Mtime: 4,
FileId: "d",
Offset: 250,
Size: 50,
ModifiedTsNs: 4,
}, },
{ {
FileId: "e",
Offset: 175,
Size: 100,
Mtime: 5,
FileId: "e",
Offset: 175,
Size: 100,
ModifiedTsNs: 5,
}, },
} }
visibles := readResolvedChunks(chunks) visibles := readResolvedChunks(chunks)
for _, visible := range visibles { for _, visible := range visibles {
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime)
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
} }
} }
@ -76,8 +76,8 @@ func TestRandomizedReadResolvedChunks(t *testing.T) {
for _, visible := range visibles { for _, visible := range visibles {
for i := visible.start; i < visible.stop; i++ { for i := visible.start; i < visible.stop; i++ {
if array[i] != visible.modifiedTime {
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime)
if array[i] != visible.modifiedTsNs {
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs)
} }
} }
} }
@ -92,10 +92,10 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil
} }
// fmt.Printf("write [%d,%d) %d\n", start, start+size, ts) // fmt.Printf("write [%d,%d) %d\n", start, start+size, ts)
return &filer_pb.FileChunk{ return &filer_pb.FileChunk{
FileId: "",
Offset: start,
Size: uint64(size),
Mtime: ts,
FileId: "",
Offset: start,
Size: uint64(size),
ModifiedTsNs: ts,
} }
} }
@ -105,10 +105,10 @@ func TestSequentialReadResolvedChunks(t *testing.T) {
var chunks []*filer_pb.FileChunk var chunks []*filer_pb.FileChunk
for ts := int64(0); ts < 13; ts++ { for ts := int64(0); ts < 13; ts++ {
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
FileId: "",
Offset: chunkSize * ts,
Size: uint64(chunkSize),
Mtime: 1,
FileId: "",
Offset: chunkSize * ts,
Size: uint64(chunkSize),
ModifiedTsNs: 1,
}) })
} }
@ -122,89 +122,89 @@ func TestActualReadResolvedChunks(t *testing.T) {
chunks := []*filer_pb.FileChunk{ chunks := []*filer_pb.FileChunk{
{ {
FileId: "5,e7b96fef48",
Offset: 0,
Size: 2097152,
Mtime: 1634447487595823000,
FileId: "5,e7b96fef48",
Offset: 0,
Size: 2097152,
ModifiedTsNs: 1634447487595823000,
}, },
{ {
FileId: "5,e5562640b9",
Offset: 2097152,
Size: 2097152,
Mtime: 1634447487595826000,
FileId: "5,e5562640b9",
Offset: 2097152,
Size: 2097152,
ModifiedTsNs: 1634447487595826000,
}, },
{ {
FileId: "5,df033e0fe4",
Offset: 4194304,
Size: 2097152,
Mtime: 1634447487595827000,
FileId: "5,df033e0fe4",
Offset: 4194304,
Size: 2097152,
ModifiedTsNs: 1634447487595827000,
}, },
{ {
FileId: "7,eb08148a9b",
Offset: 6291456,
Size: 2097152,
Mtime: 1634447487595827000,
FileId: "7,eb08148a9b",
Offset: 6291456,
Size: 2097152,
ModifiedTsNs: 1634447487595827000,
}, },
{ {
FileId: "7,e0f92d1604",
Offset: 8388608,
Size: 2097152,
Mtime: 1634447487595828000,
FileId: "7,e0f92d1604",
Offset: 8388608,
Size: 2097152,
ModifiedTsNs: 1634447487595828000,
}, },
{ {
FileId: "7,e33cb63262",
Offset: 10485760,
Size: 2097152,
Mtime: 1634447487595828000,
FileId: "7,e33cb63262",
Offset: 10485760,
Size: 2097152,
ModifiedTsNs: 1634447487595828000,
}, },
{ {
FileId: "5,ea98e40e93",
Offset: 12582912,
Size: 2097152,
Mtime: 1634447487595829000,
FileId: "5,ea98e40e93",
Offset: 12582912,
Size: 2097152,
ModifiedTsNs: 1634447487595829000,
}, },
{ {
FileId: "5,e165661172",
Offset: 14680064,
Size: 2097152,
Mtime: 1634447487595829000,
FileId: "5,e165661172",
Offset: 14680064,
Size: 2097152,
ModifiedTsNs: 1634447487595829000,
}, },
{ {
FileId: "3,e692097486",
Offset: 16777216,
Size: 2097152,
Mtime: 1634447487595830000,
FileId: "3,e692097486",
Offset: 16777216,
Size: 2097152,
ModifiedTsNs: 1634447487595830000,
}, },
{ {
FileId: "3,e28e2e3cbd",
Offset: 18874368,
Size: 2097152,
Mtime: 1634447487595830000,
FileId: "3,e28e2e3cbd",
Offset: 18874368,
Size: 2097152,
ModifiedTsNs: 1634447487595830000,
}, },
{ {
FileId: "3,e443974d4e",
Offset: 20971520,
Size: 2097152,
Mtime: 1634447487595830000,
FileId: "3,e443974d4e",
Offset: 20971520,
Size: 2097152,
ModifiedTsNs: 1634447487595830000,
}, },
{ {
FileId: "2,e815bed597",
Offset: 23068672,
Size: 2097152,
Mtime: 1634447487595831000,
FileId: "2,e815bed597",
Offset: 23068672,
Size: 2097152,
ModifiedTsNs: 1634447487595831000,
}, },
{ {
FileId: "5,e94715199e",
Offset: 25165824,
Size: 1974736,
Mtime: 1634447487595832000,
FileId: "5,e94715199e",
Offset: 25165824,
Size: 1974736,
ModifiedTsNs: 1634447487595832000,
}, },
} }
visibles := readResolvedChunks(chunks) visibles := readResolvedChunks(chunks)
for _, visible := range visibles { for _, visible := range visibles {
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime)
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs)
} }
} }

164
weed/filer/filechunks_test.go

@ -15,10 +15,10 @@ import (
func TestCompactFileChunks(t *testing.T) { func TestCompactFileChunks(t *testing.T) {
chunks := []*filer_pb.FileChunk{ chunks := []*filer_pb.FileChunk{
{Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
{Offset: 100, Size: 100, FileId: "def", Mtime: 100},
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
{Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
{Offset: 10, Size: 100, FileId: "abc", ModifiedTsNs: 50},
{Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
{Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
{Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300},
} }
compacted, garbage := CompactFileChunks(nil, chunks) compacted, garbage := CompactFileChunks(nil, chunks)
@ -35,22 +35,22 @@ func TestCompactFileChunks(t *testing.T) {
func TestCompactFileChunks2(t *testing.T) { func TestCompactFileChunks2(t *testing.T) {
chunks := []*filer_pb.FileChunk{ chunks := []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 50},
{Offset: 100, Size: 100, FileId: "def", Mtime: 100},
{Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
{Offset: 0, Size: 100, FileId: "abcf", Mtime: 300},
{Offset: 50, Size: 100, FileId: "fhfh", Mtime: 400},
{Offset: 100, Size: 100, FileId: "yuyu", Mtime: 500},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50},
{Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
{Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
{Offset: 0, Size: 100, FileId: "abcf", ModifiedTsNs: 300},
{Offset: 50, Size: 100, FileId: "fhfh", ModifiedTsNs: 400},
{Offset: 100, Size: 100, FileId: "yuyu", ModifiedTsNs: 500},
} }
k := 3 k := 3
for n := 0; n < k; n++ { for n := 0; n < k; n++ {
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
}) })
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
}) })
} }
@ -78,11 +78,11 @@ func TestRandomFileChunksCompact(t *testing.T) {
stop = start + 16 stop = start + 16
} }
chunk := &filer_pb.FileChunk{ chunk := &filer_pb.FileChunk{
FileId: strconv.Itoa(i),
Offset: int64(start),
Size: uint64(stop - start),
Mtime: int64(i),
Fid: &filer_pb.FileId{FileKey: uint64(i)},
FileId: strconv.Itoa(i),
Offset: int64(start),
Size: uint64(stop - start),
ModifiedTsNs: int64(i),
Fid: &filer_pb.FileId{FileKey: uint64(i)},
} }
chunks = append(chunks, chunk) chunks = append(chunks, chunk)
for x := start; x < stop; x++ { for x := start; x < stop; x++ {
@ -109,9 +109,9 @@ func TestIntervalMerging(t *testing.T) {
// case 0: normal // case 0: normal
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 100, fileId: "abc"}, {start: 0, stop: 100, fileId: "abc"},
@ -122,8 +122,8 @@ func TestIntervalMerging(t *testing.T) {
// case 1: updates overwrite full chunks // case 1: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 200, fileId: "asdf"}, {start: 0, stop: 200, fileId: "asdf"},
@ -132,8 +132,8 @@ func TestIntervalMerging(t *testing.T) {
// case 2: updates overwrite part of previous chunks // case 2: updates overwrite part of previous chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "a", Mtime: 123},
{Offset: 0, Size: 70, FileId: "b", Mtime: 134},
{Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
{Offset: 0, Size: 70, FileId: "b", ModifiedTsNs: 134},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 70, fileId: "b"}, {start: 0, stop: 70, fileId: "b"},
@ -143,9 +143,9 @@ func TestIntervalMerging(t *testing.T) {
// case 3: updates overwrite full chunks // case 3: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 50, fileId: "asdf"}, {start: 0, stop: 50, fileId: "asdf"},
@ -155,9 +155,9 @@ func TestIntervalMerging(t *testing.T) {
// case 4: updates far away from prev chunks // case 4: updates far away from prev chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 200, fileId: "asdf"}, {start: 0, stop: 200, fileId: "asdf"},
@ -167,10 +167,10 @@ func TestIntervalMerging(t *testing.T) {
// case 5: updates overwrite full chunks // case 5: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "a", Mtime: 123},
{Offset: 0, Size: 200, FileId: "d", Mtime: 184},
{Offset: 70, Size: 150, FileId: "c", Mtime: 143},
{Offset: 80, Size: 100, FileId: "b", Mtime: 134},
{Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "d", ModifiedTsNs: 184},
{Offset: 70, Size: 150, FileId: "c", ModifiedTsNs: 143},
{Offset: 80, Size: 100, FileId: "b", ModifiedTsNs: 134},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 200, fileId: "d"}, {start: 0, stop: 200, fileId: "d"},
@ -180,9 +180,9 @@ func TestIntervalMerging(t *testing.T) {
// case 6: same updates // case 6: same updates
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 100, fileId: "xyz"}, {start: 0, stop: 100, fileId: "xyz"},
@ -191,12 +191,12 @@ func TestIntervalMerging(t *testing.T) {
// case 7: real updates // case 7: real updates
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
{Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
{Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
{Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
{Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
{Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
{Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", ModifiedTsNs: 123},
{Offset: 0, Size: 3145728, FileId: "3,029565bf3092", ModifiedTsNs: 130},
{Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", ModifiedTsNs: 140},
{Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", ModifiedTsNs: 150},
{Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", ModifiedTsNs: 160},
{Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", ModifiedTsNs: 170},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 2097152, fileId: "3,029565bf3092"}, {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
@ -209,11 +209,11 @@ func TestIntervalMerging(t *testing.T) {
// case 8: real bug // case 8: real bug
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 77824, FileId: "4,0b3df938e301", Mtime: 123},
{Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", Mtime: 130},
{Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", Mtime: 140},
{Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", Mtime: 150},
{Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", Mtime: 160},
{Offset: 0, Size: 77824, FileId: "4,0b3df938e301", ModifiedTsNs: 123},
{Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", ModifiedTsNs: 130},
{Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", ModifiedTsNs: 140},
{Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", ModifiedTsNs: 150},
{Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", ModifiedTsNs: 160},
}, },
Expected: []*VisibleInterval{ Expected: []*VisibleInterval{
{start: 0, stop: 77824, fileId: "4,0b3df938e301"}, {start: 0, stop: 77824, fileId: "4,0b3df938e301"},
@ -269,9 +269,9 @@ func TestChunksReading(t *testing.T) {
// case 0: normal // case 0: normal
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
}, },
Offset: 0, Offset: 0,
Size: 250, Size: 250,
@ -284,8 +284,8 @@ func TestChunksReading(t *testing.T) {
// case 1: updates overwrite full chunks // case 1: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
}, },
Offset: 50, Offset: 50,
Size: 100, Size: 100,
@ -296,8 +296,8 @@ func TestChunksReading(t *testing.T) {
// case 2: updates overwrite part of previous chunks // case 2: updates overwrite part of previous chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 3, Size: 100, FileId: "a", Mtime: 123},
{Offset: 10, Size: 50, FileId: "b", Mtime: 134},
{Offset: 3, Size: 100, FileId: "a", ModifiedTsNs: 123},
{Offset: 10, Size: 50, FileId: "b", ModifiedTsNs: 134},
}, },
Offset: 30, Offset: 30,
Size: 40, Size: 40,
@ -309,9 +309,9 @@ func TestChunksReading(t *testing.T) {
// case 3: updates overwrite full chunks // case 3: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
}, },
Offset: 0, Offset: 0,
Size: 200, Size: 200,
@ -323,9 +323,9 @@ func TestChunksReading(t *testing.T) {
// case 4: updates far away from prev chunks // case 4: updates far away from prev chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
}, },
Offset: 0, Offset: 0,
Size: 400, Size: 400,
@ -337,10 +337,10 @@ func TestChunksReading(t *testing.T) {
// case 5: updates overwrite full chunks // case 5: updates overwrite full chunks
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "a", Mtime: 123},
{Offset: 0, Size: 200, FileId: "c", Mtime: 184},
{Offset: 70, Size: 150, FileId: "b", Mtime: 143},
{Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
{Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
{Offset: 0, Size: 200, FileId: "c", ModifiedTsNs: 184},
{Offset: 70, Size: 150, FileId: "b", ModifiedTsNs: 143},
{Offset: 80, Size: 100, FileId: "xxxx", ModifiedTsNs: 134},
}, },
Offset: 0, Offset: 0,
Size: 220, Size: 220,
@ -352,9 +352,9 @@ func TestChunksReading(t *testing.T) {
// case 6: same updates // case 6: same updates
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123},
{Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123},
{Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123},
}, },
Offset: 0, Offset: 0,
Size: 100, Size: 100,
@ -365,9 +365,9 @@ func TestChunksReading(t *testing.T) {
// case 7: edge cases // case 7: edge cases
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
{Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
}, },
Offset: 0, Offset: 0,
Size: 200, Size: 200,
@ -379,9 +379,9 @@ func TestChunksReading(t *testing.T) {
// case 8: edge cases // case 8: edge cases
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
{Offset: 90, Size: 200, FileId: "asdf", Mtime: 134},
{Offset: 190, Size: 300, FileId: "fsad", Mtime: 353},
{Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
{Offset: 90, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
{Offset: 190, Size: 300, FileId: "fsad", ModifiedTsNs: 353},
}, },
Offset: 0, Offset: 0,
Size: 300, Size: 300,
@ -394,12 +394,12 @@ func TestChunksReading(t *testing.T) {
// case 9: edge cases // case 9: edge cases
{ {
Chunks: []*filer_pb.FileChunk{ Chunks: []*filer_pb.FileChunk{
{Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1},
{Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2},
{Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3},
{Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4},
{Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5},
{Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6},
{Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", ModifiedTsNs: 1},
{Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", ModifiedTsNs: 2},
{Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", ModifiedTsNs: 3},
{Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", ModifiedTsNs: 4},
{Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", ModifiedTsNs: 5},
{Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", ModifiedTsNs: 6},
}, },
Offset: 0, Offset: 0,
Size: 153578836, Size: 153578836,
@ -455,10 +455,10 @@ func BenchmarkCompactFileChunks(b *testing.B) {
for n := 0; n < k; n++ { for n := 0; n < k; n++ {
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
}) })
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
}) })
} }

2
weed/filer/filer_notify_test.go

@ -26,7 +26,7 @@ func TestProtoMarshal(t *testing.T) {
FileId: "234,2423423422", FileId: "234,2423423422",
Offset: 234234, Offset: 234234,
Size: 234, Size: 234,
Mtime: 12312423,
ModifiedTsNs: 12312423,
ETag: "2342342354", ETag: "2342342354",
SourceFileId: "23234,2342342342", SourceFileId: "23234,2342342342",
}, },

2
weed/mount/dirty_pages_chunked.go

@ -78,7 +78,7 @@ func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reade
pages.lastErr = err pages.lastErr = err
return return
} }
chunk.Mtime = mtime
chunk.ModifiedTsNs = mtime
pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) pages.fh.AddChunks([]*filer_pb.FileChunk{chunk})
pages.fh.entryViewCache = nil pages.fh.entryViewCache = nil
glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size)

4
weed/mount/filehandle.go

@ -124,8 +124,8 @@ func (fh *FileHandle) Release() {
} }
func lessThan(a, b *filer_pb.FileChunk) bool { func lessThan(a, b *filer_pb.FileChunk) bool {
if a.Mtime == b.Mtime {
if a.ModifiedTsNs == b.ModifiedTsNs {
return a.Fid.FileKey < b.Fid.FileKey return a.Fid.FileKey < b.Fid.FileKey
} }
return a.Mtime < b.Mtime
return a.ModifiedTsNs < b.ModifiedTsNs
} }

2
weed/operation/upload_content.go

@ -51,7 +51,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *fi
FileId: fileId, FileId: fileId,
Offset: offset, Offset: offset,
Size: uint64(uploadResult.Size), Size: uint64(uploadResult.Size),
Mtime: time.Now().UnixNano(),
ModifiedTsNs: time.Now().UnixNano(),
ETag: uploadResult.ContentMd5, ETag: uploadResult.ContentMd5,
CipherKey: uploadResult.CipherKey, CipherKey: uploadResult.CipherKey,
IsCompressed: uploadResult.Gzip > 0, IsCompressed: uploadResult.Gzip > 0,

2
weed/pb/filer.proto

@ -132,7 +132,7 @@ message FileChunk {
string file_id = 1; // to be deprecated string file_id = 1; // to be deprecated
int64 offset = 2; int64 offset = 2;
uint64 size = 3; uint64 size = 3;
int64 mtime = 4;
int64 modified_ts_ns = 4;
string e_tag = 5; string e_tag = 5;
string source_file_id = 6; // to be deprecated string source_file_id = 6; // to be deprecated
FileId fid = 7; FileId fid = 7;

31
weed/pb/filer_pb/filer.pb.go

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.28.1
// protoc v3.21.4
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: filer.proto // source: filer.proto
package filer_pb package filer_pb
@ -596,7 +596,7 @@ type FileChunk struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated
Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"`
ModifiedTsNs int64 `protobuf:"varint,4,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"`
ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"`
SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated
Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"`
@ -659,9 +659,9 @@ func (x *FileChunk) GetSize() uint64 {
return 0 return 0
} }
func (x *FileChunk) GetMtime() int64 {
func (x *FileChunk) GetModifiedTsNs() int64 {
if x != nil { if x != nil {
return x.Mtime
return x.ModifiedTsNs
} }
return 0 return 0
} }
@ -3239,9 +3239,9 @@ func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource {
return nil return nil
} }
// ///////////////////////
/////////////////////////
// Key-Value operations // Key-Value operations
// ///////////////////////
/////////////////////////
type KvGetRequest struct { type KvGetRequest struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -3446,9 +3446,9 @@ func (x *KvPutResponse) GetError() string {
return "" return ""
} }
// ///////////////////////
/////////////////////////
// path-based configurations // path-based configurations
// ///////////////////////
/////////////////////////
type FilerConf struct { type FilerConf struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -3504,9 +3504,9 @@ func (x *FilerConf) GetLocations() []*FilerConf_PathConf {
return nil return nil
} }
// ///////////////////////
/////////////////////////
// Remote Storage related // Remote Storage related
// ///////////////////////
/////////////////////////
type CacheRemoteObjectToLocalClusterRequest struct { type CacheRemoteObjectToLocalClusterRequest struct {
state protoimpl.MessageState state protoimpl.MessageState
sizeCache protoimpl.SizeCache sizeCache protoimpl.SizeCache
@ -3886,14 +3886,15 @@ var file_filer_proto_rawDesc = []byte{
0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68,
0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x09, 0x46, 0x69,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xf6, 0x02, 0x0a, 0x09, 0x46, 0x69,
0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64,
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05,
0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69,
0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28,
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0e,
0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x04,
0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x73,
0x4e, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a,

4
weed/pb/volume_server_pb/volume_server.pb.go

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.28.1
// protoc v3.21.4
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: volume_server.proto // source: volume_server.proto
package volume_server_pb package volume_server_pb

8
weed/pb/volume_server_pb/volume_server_grpc.pb.go

@ -1,8 +1,4 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.4
// source: volume_server.proto
package volume_server_pb package volume_server_pb
@ -22,7 +18,7 @@ const _ = grpc.SupportPackageIsVersion7
// //
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type VolumeServerClient interface { type VolumeServerClient interface {
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error)
VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error)
@ -692,7 +688,7 @@ func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ...
// All implementations must embed UnimplementedVolumeServerServer // All implementations must embed UnimplementedVolumeServerServer
// for forward compatibility // for forward compatibility
type VolumeServerServer interface { type VolumeServerServer interface {
// Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error)
VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error)
VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error

2
weed/replication/sink/filersink/fetch_write.go

@ -51,7 +51,7 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path str
FileId: fileId, FileId: fileId,
Offset: sourceChunk.Offset, Offset: sourceChunk.Offset,
Size: sourceChunk.Size, Size: sourceChunk.Size,
Mtime: sourceChunk.Mtime,
ModifiedTsNs: sourceChunk.ModifiedTsNs,
ETag: sourceChunk.ETag, ETag: sourceChunk.ETag,
SourceFileId: sourceChunk.GetFileIdString(), SourceFileId: sourceChunk.GetFileIdString(),
CipherKey: sourceChunk.CipherKey, CipherKey: sourceChunk.CipherKey,

12
weed/s3api/filer_multipart.go

@ -105,12 +105,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
} }
for _, chunk := range entry.Chunks { for _, chunk := range entry.Chunks {
p := &filer_pb.FileChunk{ p := &filer_pb.FileChunk{
FileId: chunk.GetFileIdString(),
Offset: offset,
Size: chunk.Size,
Mtime: chunk.Mtime,
CipherKey: chunk.CipherKey,
ETag: chunk.ETag,
FileId: chunk.GetFileIdString(),
Offset: offset,
Size: chunk.Size,
ModifiedTsNs: chunk.ModifiedTsNs,
CipherKey: chunk.CipherKey,
ETag: chunk.ETag,
} }
finalParts = append(finalParts, p) finalParts = append(finalParts, p)
offset += int64(chunk.Size) offset += int64(chunk.Size)

8
weed/server/filer_grpc_server_remote.go

@ -151,10 +151,10 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req
} }
chunks = append(chunks, &filer_pb.FileChunk{ chunks = append(chunks, &filer_pb.FileChunk{
FileId: assignResult.Fid,
Offset: localOffset,
Size: uint64(size),
Mtime: time.Now().Unix(),
FileId: assignResult.Fid,
Offset: localOffset,
Size: uint64(size),
ModifiedTsNs: time.Now().Unix(),
Fid: &filer_pb.FileId{ Fid: &filer_pb.FileId{
VolumeId: uint32(fileId.VolumeId), VolumeId: uint32(fileId.VolumeId),
FileKey: uint64(fileId.Key), FileKey: uint64(fileId.Key),

2
weed/shell/command_volume_fsck.go

@ -209,7 +209,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m
} }
dataChunks = append(dataChunks, manifestChunks...) dataChunks = append(dataChunks, manifestChunks...)
for _, chunk := range dataChunks { for _, chunk := range dataChunks {
if chunk.Mtime > collectMtime {
if chunk.ModifiedTsNs > collectMtime {
continue continue
} }
outputChan <- &Item{ outputChan <- &Item{

Loading…
Cancel
Save