From ea2637734a13a08d11d4f26e80c1324664bf7ffc Mon Sep 17 00:00:00 2001 From: chrislu Date: Fri, 28 Oct 2022 12:53:19 -0700 Subject: [PATCH] refactor filer proto chunk variable from mtime to modified_ts_ns --- other/java/client/pom.xml | 2 +- other/java/client/pom.xml.deploy | 2 +- other/java/client/pom_debug.xml | 2 +- .../java/seaweedfs/client/ReadChunks.java | 4 +- .../java/seaweedfs/client/SeaweedWrite.java | 2 +- other/java/client/src/main/proto/filer.proto | 2 +- .../seaweedfs/client/SeaweedReadTest.java | 16 +- other/java/examples/pom.xml | 4 +- other/java/hdfs2/dependency-reduced-pom.xml | 2 +- other/java/hdfs2/pom.xml | 2 +- other/java/hdfs3/dependency-reduced-pom.xml | 2 +- other/java/hdfs3/pom.xml | 2 +- weed/filer/filechunks.go | 16 +- weed/filer/filechunks2_test.go | 36 ++-- weed/filer/filechunks_read.go | 6 +- weed/filer/filechunks_read_test.go | 168 +++++++++--------- weed/filer/filechunks_test.go | 164 ++++++++--------- weed/filer/filer_notify_test.go | 2 +- weed/mount/dirty_pages_chunked.go | 2 +- weed/mount/filehandle.go | 4 +- weed/operation/upload_content.go | 2 +- weed/pb/filer.proto | 2 +- weed/pb/filer_pb/filer.pb.go | 31 ++-- weed/pb/volume_server_pb/volume_server.pb.go | 4 +- .../volume_server_pb/volume_server_grpc.pb.go | 8 +- .../replication/sink/filersink/fetch_write.go | 2 +- weed/s3api/filer_multipart.go | 14 +- weed/server/filer_grpc_server_remote.go | 8 +- weed/shell/command_volume_fsck.go | 2 +- 29 files changed, 255 insertions(+), 258 deletions(-) diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 3ed76436d..5abbf705a 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 3.30 + 3.33 org.sonatype.oss diff --git a/other/java/client/pom.xml.deploy b/other/java/client/pom.xml.deploy index 4b661c6cd..10b76ead4 100644 --- a/other/java/client/pom.xml.deploy +++ b/other/java/client/pom.xml.deploy @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 3.30 + 3.33 org.sonatype.oss diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml index 091271503..1789bc906 100644 --- a/other/java/client/pom_debug.xml +++ b/other/java/client/pom_debug.xml @@ -5,7 +5,7 @@ com.github.chrislusf seaweedfs-client - 3.30 + 3.33 org.sonatype.oss diff --git a/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java b/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java index 2eba4f808..e563c0ccc 100644 --- a/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java +++ b/other/java/client/src/main/java/seaweedfs/client/ReadChunks.java @@ -83,7 +83,7 @@ public class ReadChunks { prevX, point.x, chunk.getFileId(), - chunk.getMtime(), + chunk.getModifiedTsNs(), prevX - chunk.getOffset(), chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x, chunk.getCipherKey().toByteArray(), @@ -100,7 +100,7 @@ public class ReadChunks { public Point(long x, FilerProto.FileChunk chunk, boolean isStart) { this.x = x; - this.ts = chunk.getMtime(); + this.ts = chunk.getModifiedTsNs(); this.chunk = chunk; this.isStart = isStart; } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index 48256a3e6..f477303c9 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -98,7 +98,7 @@ public class SeaweedWrite { .setFileId(fileId) .setOffset(offset) .setSize(bytesLength) - .setMtime(System.currentTimeMillis() / 10000L) + .setModifiedTsNs(System.nanoTime()) .setETag(etag) .setCipherKey(cipherKeyString); } diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 66c6d2a13..821b6c281 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -132,7 +132,7 @@ message FileChunk { string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; - int64 mtime = 4; + int64 modified_ts_ns = 4; string e_tag = 5; string source_file_id = 6; // to be deprecated FileId fid = 7; diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java index 6ad9edb2c..137148425 100644 --- a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java @@ -17,13 +17,13 @@ public class SeaweedReadTest { .setFileId("aaa") .setOffset(0) .setSize(100) - .setMtime(1000) + .setModifiedTsNs(1000) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("bbb") .setOffset(100) .setSize(133) - .setMtime(2000) + .setModifiedTsNs(2000) .build()); List visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks); @@ -70,31 +70,31 @@ public class SeaweedReadTest { .setFileId("a") .setOffset(0) .setSize(100) - .setMtime(1) + .setModifiedTsNs(1) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("b") .setOffset(50) .setSize(100) - .setMtime(2) + .setModifiedTsNs(2) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("c") .setOffset(200) .setSize(50) - .setMtime(3) + .setModifiedTsNs(3) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("d") .setOffset(250) .setSize(50) - .setMtime(4) + .setModifiedTsNs(4) .build()); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("e") .setOffset(175) .setSize(100) - .setMtime(5) + .setModifiedTsNs(5) .build()); List visibleIntervals = ReadChunks.readResolvedChunks(chunks); @@ -161,7 +161,7 @@ public class SeaweedReadTest { .setFileId("") .setOffset(start) .setSize(size) - .setMtime(ts) + .setModifiedTsNs(ts) .build(); } } diff --git a/other/java/examples/pom.xml b/other/java/examples/pom.xml index 3f5ef1cc3..379396734 100644 --- a/other/java/examples/pom.xml +++ b/other/java/examples/pom.xml @@ -11,13 +11,13 @@ com.github.chrislusf seaweedfs-client - 3.30 + 3.33 compile com.github.chrislusf seaweedfs-hadoop2-client - 3.30 + 3.33 compile diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index 3ee5034d0..866ee4915 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -317,7 +317,7 @@ - 3.30 + 3.33 3.2.4 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index 8bbd4c99e..3a0927905 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 3.30 + 3.33 3.2.4 diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index 33d548ac9..13c0f6182 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -317,7 +317,7 @@ - 3.30 + 3.33 3.2.4 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index 0dd08412f..961af9b2b 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 3.30 + 3.33 3.2.4 diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 00f4c2921..965c73a77 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -192,7 +192,7 @@ func logPrintf(name string, visibles []VisibleInterval) { func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { - newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.ModifiedTsNs, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) length := len(visibles) if length == 0 { @@ -208,12 +208,12 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n chunkStop := chunk.Offset + int64(chunk.Size) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { - t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) + t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTsNs, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) newVisibles = append(newVisibles, t) // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) } if v.start < chunkStop && chunkStop < v.stop { - t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) + t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTsNs, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) newVisibles = append(newVisibles, t) // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) } @@ -254,7 +254,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction return visibles2, err } slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { - if a.Mtime == b.Mtime { + if a.ModifiedTsNs == b.ModifiedTsNs { filer_pb.EnsureFid(a) filer_pb.EnsureFid(b) if a.Fid == nil || b.Fid == nil { @@ -262,7 +262,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunction } return a.Fid.FileKey < b.Fid.FileKey } - return a.Mtime < b.Mtime + return a.ModifiedTsNs < b.ModifiedTsNs }) for _, chunk := range chunks { @@ -288,7 +288,7 @@ func checkDifference(x, y VisibleInterval) { if x.start != y.start || x.stop != y.stop || x.fileId != y.fileId || - x.modifiedTime != y.modifiedTime { + x.modifiedTsNs != y.modifiedTsNs { fmt.Printf("different visible %+v : %+v\n", x, y) } } @@ -299,7 +299,7 @@ func checkDifference(x, y VisibleInterval) { type VisibleInterval struct { start int64 stop int64 - modifiedTime int64 + modifiedTsNs int64 fileId string chunkOffset int64 chunkSize uint64 @@ -312,7 +312,7 @@ func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, ch start: start, stop: stop, fileId: fileId, - modifiedTime: modifiedTime, + modifiedTsNs: modifiedTime, chunkOffset: chunkOffset, // the starting position in the chunk chunkSize: chunkSize, cipherKey: cipherKey, diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go index 7aa00864b..6966360ad 100644 --- a/weed/filer/filechunks2_test.go +++ b/weed/filer/filechunks2_test.go @@ -17,14 +17,14 @@ func TestDoMinusChunks(t *testing.T) { // clusterA append a new line and then clusterB also append a new line // clusterA append a new line again chunksInA := []*filer_pb.FileChunk{ - {Offset: 0, Size: 3, FileId: "11", Mtime: 100}, - {Offset: 3, Size: 3, FileId: "22", SourceFileId: "2", Mtime: 200}, - {Offset: 6, Size: 3, FileId: "33", Mtime: 300}, + {Offset: 0, Size: 3, FileId: "11", ModifiedTsNs: 100}, + {Offset: 3, Size: 3, FileId: "22", SourceFileId: "2", ModifiedTsNs: 200}, + {Offset: 6, Size: 3, FileId: "33", ModifiedTsNs: 300}, } chunksInB := []*filer_pb.FileChunk{ - {Offset: 0, Size: 3, FileId: "1", SourceFileId: "11", Mtime: 100}, - {Offset: 3, Size: 3, FileId: "2", Mtime: 200}, - {Offset: 6, Size: 3, FileId: "3", SourceFileId: "33", Mtime: 300}, + {Offset: 0, Size: 3, FileId: "1", SourceFileId: "11", ModifiedTsNs: 100}, + {Offset: 3, Size: 3, FileId: "2", ModifiedTsNs: 200}, + {Offset: 6, Size: 3, FileId: "3", SourceFileId: "33", ModifiedTsNs: 300}, } // clusterB using command "echo 'content' > hello.txt" to overwrite file @@ -50,17 +50,17 @@ func TestDoMinusChunks(t *testing.T) { func TestCompactFileChunksRealCase(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497}, - {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492}, - {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928}, - {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894}, - {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900}, - {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904}, - {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910}, - {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903}, - {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911}, - {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909}, - {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922}, + {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, ModifiedTsNs: 5320497}, + {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, ModifiedTsNs: 5320492}, + {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, ModifiedTsNs: 5325928}, + {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, ModifiedTsNs: 5325894}, + {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, ModifiedTsNs: 5325900}, + {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, ModifiedTsNs: 5325904}, + {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, ModifiedTsNs: 5325910}, + {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, ModifiedTsNs: 5325903}, + {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, ModifiedTsNs: 5325911}, + {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, ModifiedTsNs: 5325909}, + {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, ModifiedTsNs: 5325922}, } printChunks("before", chunks) @@ -75,7 +75,7 @@ func TestCompactFileChunksRealCase(t *testing.T) { func printChunks(name string, chunks []*filer_pb.FileChunk) { slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool { if a.Offset == b.Offset { - return a.Mtime < b.Mtime + return a.ModifiedTsNs < b.ModifiedTsNs } return a.Offset < b.Offset }) diff --git a/weed/filer/filechunks_read.go b/weed/filer/filechunks_read.go index 96ea92afb..8a15f6e7a 100644 --- a/weed/filer/filechunks_read.go +++ b/weed/filer/filechunks_read.go @@ -11,13 +11,13 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterva for _, chunk := range chunks { points = append(points, &Point{ x: chunk.Offset, - ts: chunk.Mtime, + ts: chunk.ModifiedTsNs, chunk: chunk, isStart: true, }) points = append(points, &Point{ x: chunk.Offset + int64(chunk.Size), - ts: chunk.Mtime, + ts: chunk.ModifiedTsNs, chunk: chunk, isStart: false, }) @@ -98,7 +98,7 @@ func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, p start: prevX, stop: point.x, fileId: chunk.GetFileIdString(), - modifiedTime: chunk.Mtime, + modifiedTsNs: chunk.ModifiedTsNs, chunkOffset: prevX - chunk.Offset, chunkSize: chunk.Size, cipherKey: chunk.CipherKey, diff --git a/weed/filer/filechunks_read_test.go b/weed/filer/filechunks_read_test.go index b3fa52ebd..d4bfca72e 100644 --- a/weed/filer/filechunks_read_test.go +++ b/weed/filer/filechunks_read_test.go @@ -11,41 +11,41 @@ func TestReadResolvedChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ { - FileId: "a", - Offset: 0, - Size: 100, - Mtime: 1, + FileId: "a", + Offset: 0, + Size: 100, + ModifiedTsNs: 1, }, { - FileId: "b", - Offset: 50, - Size: 100, - Mtime: 2, + FileId: "b", + Offset: 50, + Size: 100, + ModifiedTsNs: 2, }, { - FileId: "c", - Offset: 200, - Size: 50, - Mtime: 3, + FileId: "c", + Offset: 200, + Size: 50, + ModifiedTsNs: 3, }, { - FileId: "d", - Offset: 250, - Size: 50, - Mtime: 4, + FileId: "d", + Offset: 250, + Size: 50, + ModifiedTsNs: 4, }, { - FileId: "e", - Offset: 175, - Size: 100, - Mtime: 5, + FileId: "e", + Offset: 175, + Size: 100, + ModifiedTsNs: 5, }, } visibles := readResolvedChunks(chunks) for _, visible := range visibles { - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) } } @@ -76,8 +76,8 @@ func TestRandomizedReadResolvedChunks(t *testing.T) { for _, visible := range visibles { for i := visible.start; i < visible.stop; i++ { - if array[i] != visible.modifiedTime { - t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime) + if array[i] != visible.modifiedTsNs { + t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTsNs) } } } @@ -92,10 +92,10 @@ func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.Fil } // fmt.Printf("write [%d,%d) %d\n", start, start+size, ts) return &filer_pb.FileChunk{ - FileId: "", - Offset: start, - Size: uint64(size), - Mtime: ts, + FileId: "", + Offset: start, + Size: uint64(size), + ModifiedTsNs: ts, } } @@ -105,10 +105,10 @@ func TestSequentialReadResolvedChunks(t *testing.T) { var chunks []*filer_pb.FileChunk for ts := int64(0); ts < 13; ts++ { chunks = append(chunks, &filer_pb.FileChunk{ - FileId: "", - Offset: chunkSize * ts, - Size: uint64(chunkSize), - Mtime: 1, + FileId: "", + Offset: chunkSize * ts, + Size: uint64(chunkSize), + ModifiedTsNs: 1, }) } @@ -122,89 +122,89 @@ func TestActualReadResolvedChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ { - FileId: "5,e7b96fef48", - Offset: 0, - Size: 2097152, - Mtime: 1634447487595823000, + FileId: "5,e7b96fef48", + Offset: 0, + Size: 2097152, + ModifiedTsNs: 1634447487595823000, }, { - FileId: "5,e5562640b9", - Offset: 2097152, - Size: 2097152, - Mtime: 1634447487595826000, + FileId: "5,e5562640b9", + Offset: 2097152, + Size: 2097152, + ModifiedTsNs: 1634447487595826000, }, { - FileId: "5,df033e0fe4", - Offset: 4194304, - Size: 2097152, - Mtime: 1634447487595827000, + FileId: "5,df033e0fe4", + Offset: 4194304, + Size: 2097152, + ModifiedTsNs: 1634447487595827000, }, { - FileId: "7,eb08148a9b", - Offset: 6291456, - Size: 2097152, - Mtime: 1634447487595827000, + FileId: "7,eb08148a9b", + Offset: 6291456, + Size: 2097152, + ModifiedTsNs: 1634447487595827000, }, { - FileId: "7,e0f92d1604", - Offset: 8388608, - Size: 2097152, - Mtime: 1634447487595828000, + FileId: "7,e0f92d1604", + Offset: 8388608, + Size: 2097152, + ModifiedTsNs: 1634447487595828000, }, { - FileId: "7,e33cb63262", - Offset: 10485760, - Size: 2097152, - Mtime: 1634447487595828000, + FileId: "7,e33cb63262", + Offset: 10485760, + Size: 2097152, + ModifiedTsNs: 1634447487595828000, }, { - FileId: "5,ea98e40e93", - Offset: 12582912, - Size: 2097152, - Mtime: 1634447487595829000, + FileId: "5,ea98e40e93", + Offset: 12582912, + Size: 2097152, + ModifiedTsNs: 1634447487595829000, }, { - FileId: "5,e165661172", - Offset: 14680064, - Size: 2097152, - Mtime: 1634447487595829000, + FileId: "5,e165661172", + Offset: 14680064, + Size: 2097152, + ModifiedTsNs: 1634447487595829000, }, { - FileId: "3,e692097486", - Offset: 16777216, - Size: 2097152, - Mtime: 1634447487595830000, + FileId: "3,e692097486", + Offset: 16777216, + Size: 2097152, + ModifiedTsNs: 1634447487595830000, }, { - FileId: "3,e28e2e3cbd", - Offset: 18874368, - Size: 2097152, - Mtime: 1634447487595830000, + FileId: "3,e28e2e3cbd", + Offset: 18874368, + Size: 2097152, + ModifiedTsNs: 1634447487595830000, }, { - FileId: "3,e443974d4e", - Offset: 20971520, - Size: 2097152, - Mtime: 1634447487595830000, + FileId: "3,e443974d4e", + Offset: 20971520, + Size: 2097152, + ModifiedTsNs: 1634447487595830000, }, { - FileId: "2,e815bed597", - Offset: 23068672, - Size: 2097152, - Mtime: 1634447487595831000, + FileId: "2,e815bed597", + Offset: 23068672, + Size: 2097152, + ModifiedTsNs: 1634447487595831000, }, { - FileId: "5,e94715199e", - Offset: 25165824, - Size: 1974736, - Mtime: 1634447487595832000, + FileId: "5,e94715199e", + Offset: 25165824, + Size: 1974736, + ModifiedTsNs: 1634447487595832000, }, } visibles := readResolvedChunks(chunks) for _, visible := range visibles { - fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) + fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTsNs) } } diff --git a/weed/filer/filechunks_test.go b/weed/filer/filechunks_test.go index c3d3d51c6..d29e0a600 100644 --- a/weed/filer/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -15,10 +15,10 @@ import ( func TestCompactFileChunks(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {Offset: 10, Size: 100, FileId: "abc", Mtime: 50}, - {Offset: 100, Size: 100, FileId: "def", Mtime: 100}, - {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200}, - {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300}, + {Offset: 10, Size: 100, FileId: "abc", ModifiedTsNs: 50}, + {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100}, + {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200}, + {Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300}, } compacted, garbage := CompactFileChunks(nil, chunks) @@ -35,22 +35,22 @@ func TestCompactFileChunks(t *testing.T) { func TestCompactFileChunks2(t *testing.T) { chunks := []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 50}, - {Offset: 100, Size: 100, FileId: "def", Mtime: 100}, - {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200}, - {Offset: 0, Size: 100, FileId: "abcf", Mtime: 300}, - {Offset: 50, Size: 100, FileId: "fhfh", Mtime: 400}, - {Offset: 100, Size: 100, FileId: "yuyu", Mtime: 500}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50}, + {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100}, + {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200}, + {Offset: 0, Size: 100, FileId: "abcf", ModifiedTsNs: 300}, + {Offset: 50, Size: 100, FileId: "fhfh", ModifiedTsNs: 400}, + {Offset: 100, Size: 100, FileId: "yuyu", ModifiedTsNs: 500}, } k := 3 for n := 0; n < k; n++ { chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n), + Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n), }) chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k), + Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k), }) } @@ -78,11 +78,11 @@ func TestRandomFileChunksCompact(t *testing.T) { stop = start + 16 } chunk := &filer_pb.FileChunk{ - FileId: strconv.Itoa(i), - Offset: int64(start), - Size: uint64(stop - start), - Mtime: int64(i), - Fid: &filer_pb.FileId{FileKey: uint64(i)}, + FileId: strconv.Itoa(i), + Offset: int64(start), + Size: uint64(stop - start), + ModifiedTsNs: int64(i), + Fid: &filer_pb.FileId{FileKey: uint64(i)}, } chunks = append(chunks, chunk) for x := start; x < stop; x++ { @@ -109,9 +109,9 @@ func TestIntervalMerging(t *testing.T) { // case 0: normal { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, - {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, }, Expected: []*VisibleInterval{ {start: 0, stop: 100, fileId: "abc"}, @@ -122,8 +122,8 @@ func TestIntervalMerging(t *testing.T) { // case 1: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "asdf"}, @@ -132,8 +132,8 @@ func TestIntervalMerging(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, - {Offset: 0, Size: 70, FileId: "b", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, + {Offset: 0, Size: 70, FileId: "b", ModifiedTsNs: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 70, fileId: "b"}, @@ -143,9 +143,9 @@ func TestIntervalMerging(t *testing.T) { // case 3: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, - {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, }, Expected: []*VisibleInterval{ {start: 0, stop: 50, fileId: "asdf"}, @@ -155,9 +155,9 @@ func TestIntervalMerging(t *testing.T) { // case 4: updates far away from prev chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, - {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "asdf"}, @@ -167,10 +167,10 @@ func TestIntervalMerging(t *testing.T) { // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "d", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "c", Mtime: 143}, - {Offset: 80, Size: 100, FileId: "b", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "d", ModifiedTsNs: 184}, + {Offset: 70, Size: 150, FileId: "c", ModifiedTsNs: 143}, + {Offset: 80, Size: 100, FileId: "b", ModifiedTsNs: 134}, }, Expected: []*VisibleInterval{ {start: 0, stop: 200, fileId: "d"}, @@ -180,9 +180,9 @@ func TestIntervalMerging(t *testing.T) { // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, - {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123}, }, Expected: []*VisibleInterval{ {start: 0, stop: 100, fileId: "xyz"}, @@ -191,12 +191,12 @@ func TestIntervalMerging(t *testing.T) { // case 7: real updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123}, - {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130}, - {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140}, - {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150}, - {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160}, - {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170}, + {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", ModifiedTsNs: 123}, + {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", ModifiedTsNs: 130}, + {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", ModifiedTsNs: 140}, + {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", ModifiedTsNs: 150}, + {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", ModifiedTsNs: 160}, + {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", ModifiedTsNs: 170}, }, Expected: []*VisibleInterval{ {start: 0, stop: 2097152, fileId: "3,029565bf3092"}, @@ -209,11 +209,11 @@ func TestIntervalMerging(t *testing.T) { // case 8: real bug { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", Mtime: 123}, - {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", Mtime: 130}, - {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", Mtime: 140}, - {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", Mtime: 150}, - {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", Mtime: 160}, + {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", ModifiedTsNs: 123}, + {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", ModifiedTsNs: 130}, + {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", ModifiedTsNs: 140}, + {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", ModifiedTsNs: 150}, + {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", ModifiedTsNs: 160}, }, Expected: []*VisibleInterval{ {start: 0, stop: 77824, fileId: "4,0b3df938e301"}, @@ -269,9 +269,9 @@ func TestChunksReading(t *testing.T) { // case 0: normal { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, - {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, }, Offset: 0, Size: 250, @@ -284,8 +284,8 @@ func TestChunksReading(t *testing.T) { // case 1: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, }, Offset: 50, Size: 100, @@ -296,8 +296,8 @@ func TestChunksReading(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 3, Size: 100, FileId: "a", Mtime: 123}, - {Offset: 10, Size: 50, FileId: "b", Mtime: 134}, + {Offset: 3, Size: 100, FileId: "a", ModifiedTsNs: 123}, + {Offset: 10, Size: 50, FileId: "b", ModifiedTsNs: 134}, }, Offset: 30, Size: 40, @@ -309,9 +309,9 @@ func TestChunksReading(t *testing.T) { // case 3: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, - {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, }, Offset: 0, Size: 200, @@ -323,9 +323,9 @@ func TestChunksReading(t *testing.T) { // case 4: updates far away from prev chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134}, - {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154}, }, Offset: 0, Size: 400, @@ -337,10 +337,10 @@ func TestChunksReading(t *testing.T) { // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "c", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "b", Mtime: 143}, - {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123}, + {Offset: 0, Size: 200, FileId: "c", ModifiedTsNs: 184}, + {Offset: 70, Size: 150, FileId: "b", ModifiedTsNs: 143}, + {Offset: 80, Size: 100, FileId: "xxxx", ModifiedTsNs: 134}, }, Offset: 0, Size: 220, @@ -352,9 +352,9 @@ func TestChunksReading(t *testing.T) { // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, - {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, - {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 123}, }, Offset: 0, Size: 100, @@ -365,9 +365,9 @@ func TestChunksReading(t *testing.T) { // case 7: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134}, - {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353}, }, Offset: 0, Size: 200, @@ -379,9 +379,9 @@ func TestChunksReading(t *testing.T) { // case 8: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, - {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123}, + {Offset: 90, Size: 200, FileId: "asdf", ModifiedTsNs: 134}, + {Offset: 190, Size: 300, FileId: "fsad", ModifiedTsNs: 353}, }, Offset: 0, Size: 300, @@ -394,12 +394,12 @@ func TestChunksReading(t *testing.T) { // case 9: edge cases { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, - {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, - {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, - {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, - {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, - {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", ModifiedTsNs: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", ModifiedTsNs: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", ModifiedTsNs: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", ModifiedTsNs: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", ModifiedTsNs: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", ModifiedTsNs: 6}, }, Offset: 0, Size: 153578836, @@ -455,10 +455,10 @@ func BenchmarkCompactFileChunks(b *testing.B) { for n := 0; n < k; n++ { chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n), + Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n), }) chunks = append(chunks, &filer_pb.FileChunk{ - Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k), + Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k), }) } diff --git a/weed/filer/filer_notify_test.go b/weed/filer/filer_notify_test.go index b85b4c410..9ad58629a 100644 --- a/weed/filer/filer_notify_test.go +++ b/weed/filer/filer_notify_test.go @@ -26,7 +26,7 @@ func TestProtoMarshal(t *testing.T) { FileId: "234,2423423422", Offset: 234234, Size: 234, - Mtime: 12312423, + ModifiedTsNs: 12312423, ETag: "2342342354", SourceFileId: "23234,2342342342", }, diff --git a/weed/mount/dirty_pages_chunked.go b/weed/mount/dirty_pages_chunked.go index 9cc17e2f4..803bc29a6 100644 --- a/weed/mount/dirty_pages_chunked.go +++ b/weed/mount/dirty_pages_chunked.go @@ -78,7 +78,7 @@ func (pages *ChunkedDirtyPages) saveChunkedFileIntervalToStorage(reader io.Reade pages.lastErr = err return } - chunk.Mtime = mtime + chunk.ModifiedTsNs = mtime pages.fh.AddChunks([]*filer_pb.FileChunk{chunk}) pages.fh.entryViewCache = nil glog.V(3).Infof("%v saveToStorage %s [%d,%d)", fileFullPath, chunk.FileId, offset, offset+size) diff --git a/weed/mount/filehandle.go b/weed/mount/filehandle.go index aadcb3836..b2e6730c0 100644 --- a/weed/mount/filehandle.go +++ b/weed/mount/filehandle.go @@ -124,8 +124,8 @@ func (fh *FileHandle) Release() { } func lessThan(a, b *filer_pb.FileChunk) bool { - if a.Mtime == b.Mtime { + if a.ModifiedTsNs == b.ModifiedTsNs { return a.Fid.FileKey < b.Fid.FileKey } - return a.Mtime < b.Mtime + return a.ModifiedTsNs < b.ModifiedTsNs } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index c9b15da69..ed38dfa6b 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -51,7 +51,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *fi FileId: fileId, Offset: offset, Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), + ModifiedTsNs: time.Now().UnixNano(), ETag: uploadResult.ContentMd5, CipherKey: uploadResult.CipherKey, IsCompressed: uploadResult.Gzip > 0, diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 66c6d2a13..821b6c281 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -132,7 +132,7 @@ message FileChunk { string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; - int64 mtime = 4; + int64 modified_ts_ns = 4; string e_tag = 5; string source_file_id = 6; // to be deprecated FileId fid = 7; diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 885d702ca..1e7f5bf38 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.4 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: filer.proto package filer_pb @@ -596,7 +596,7 @@ type FileChunk struct { FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"` + ModifiedTsNs int64 `protobuf:"varint,4,opt,name=modified_ts_ns,json=modifiedTsNs,proto3" json:"modified_ts_ns,omitempty"` ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` @@ -659,9 +659,9 @@ func (x *FileChunk) GetSize() uint64 { return 0 } -func (x *FileChunk) GetMtime() int64 { +func (x *FileChunk) GetModifiedTsNs() int64 { if x != nil { - return x.Mtime + return x.ModifiedTsNs } return 0 } @@ -3239,9 +3239,9 @@ func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource { return nil } -// /////////////////////// +///////////////////////// // Key-Value operations -// /////////////////////// +///////////////////////// type KvGetRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3446,9 +3446,9 @@ func (x *KvPutResponse) GetError() string { return "" } -// /////////////////////// +///////////////////////// // path-based configurations -// /////////////////////// +///////////////////////// type FilerConf struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3504,9 +3504,9 @@ func (x *FilerConf) GetLocations() []*FilerConf_PathConf { return nil } -// /////////////////////// +///////////////////////// // Remote Storage related -// /////////////////////// +///////////////////////// type CacheRemoteObjectToLocalClusterRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3886,14 +3886,15 @@ var file_filer_proto_rawDesc = []byte{ 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x09, 0x46, 0x69, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xf6, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0e, + 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x73, + 0x4e, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 3b21961aa..ac8c34a06 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.4 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: volume_server.proto package volume_server_pb diff --git a/weed/pb/volume_server_pb/volume_server_grpc.pb.go b/weed/pb/volume_server_pb/volume_server_grpc.pb.go index 0ef096d97..17b1eb015 100644 --- a/weed/pb/volume_server_pb/volume_server_grpc.pb.go +++ b/weed/pb/volume_server_pb/volume_server_grpc.pb.go @@ -1,8 +1,4 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.4 -// source: volume_server.proto package volume_server_pb @@ -22,7 +18,7 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type VolumeServerClient interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (VolumeServer_VacuumVolumeCompactClient, error) @@ -692,7 +688,7 @@ func (c *volumeServerClient) Ping(ctx context.Context, in *PingRequest, opts ... // All implementations must embed UnimplementedVolumeServerServer // for forward compatibility type VolumeServerServer interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(*VacuumVolumeCompactRequest, VolumeServer_VacuumVolumeCompactServer) error diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 2acad9e63..cd961f147 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -51,7 +51,7 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path str FileId: fileId, Offset: sourceChunk.Offset, Size: sourceChunk.Size, - Mtime: sourceChunk.Mtime, + ModifiedTsNs: sourceChunk.ModifiedTsNs, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), CipherKey: sourceChunk.CipherKey, diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 80b7ba0f3..ec0552d2e 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -105,19 +105,19 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.GetFileIdString(), - Offset: offset, - Size: chunk.Size, - Mtime: chunk.Mtime, - CipherKey: chunk.CipherKey, - ETag: chunk.ETag, + FileId: chunk.GetFileIdString(), + Offset: offset, + Size: chunk.Size, + ModifiedTsNs: chunk.ModifiedTsNs, + CipherKey: chunk.CipherKey, + ETag: chunk.ETag, } finalParts = append(finalParts, p) offset += int64(chunk.Size) } } } - + entryName := filepath.Base(*input.Key) dirName := filepath.Dir(*input.Key) if dirName == "." { diff --git a/weed/server/filer_grpc_server_remote.go b/weed/server/filer_grpc_server_remote.go index 879ed62b0..7df61744c 100644 --- a/weed/server/filer_grpc_server_remote.go +++ b/weed/server/filer_grpc_server_remote.go @@ -151,10 +151,10 @@ func (fs *FilerServer) CacheRemoteObjectToLocalCluster(ctx context.Context, req } chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: localOffset, - Size: uint64(size), - Mtime: time.Now().Unix(), + FileId: assignResult.Fid, + Offset: localOffset, + Size: uint64(size), + ModifiedTsNs: time.Now().Unix(), Fid: &filer_pb.FileId{ VolumeId: uint32(fileId.VolumeId), FileKey: uint64(fileId.Key), diff --git a/weed/shell/command_volume_fsck.go b/weed/shell/command_volume_fsck.go index 90e215790..854929998 100644 --- a/weed/shell/command_volume_fsck.go +++ b/weed/shell/command_volume_fsck.go @@ -209,7 +209,7 @@ func (c *commandVolumeFsck) collectFilerFileIdAndPaths(dataNodeVolumeIdToVInfo m } dataChunks = append(dataChunks, manifestChunks...) for _, chunk := range dataChunks { - if chunk.Mtime > collectMtime { + if chunk.ModifiedTsNs > collectMtime { continue } outputChan <- &Item{