Browse Source
Merge branch 'master' into _product
Merge branch 'master' into _product
# Conflicts: # weed/s3api/s3api_bucket_handlers.gopull/5936/head
changlin.shi
2 years ago
197 changed files with 4290 additions and 1599 deletions
-
12.github/workflows/binaries_dev.yml
-
6.github/workflows/binaries_release0.yml
-
6.github/workflows/binaries_release1.yml
-
6.github/workflows/binaries_release2.yml
-
6.github/workflows/binaries_release3.yml
-
6.github/workflows/binaries_release4.yml
-
2.github/workflows/codeql.yml
-
8.github/workflows/container_dev.yml
-
8.github/workflows/container_latest.yml
-
8.github/workflows/container_release1.yml
-
8.github/workflows/container_release2.yml
-
8.github/workflows/container_release3.yml
-
8.github/workflows/container_release4.yml
-
8.github/workflows/container_release5.yml
-
4.github/workflows/depsreview.yml
-
39.github/workflows/e2e.yml
-
2.github/workflows/go.yml
-
22.github/workflows/helm_chart_release.yml
-
74CODE_OF_CONDUCT.md
-
5README.md
-
1backers.md
-
2docker/Dockerfile.go_build
-
2docker/Dockerfile.rocksdb_dev_env
-
2docker/Dockerfile.rocksdb_large
-
14docker/seaweedfs.sql
-
149go.mod
-
770go.sum
-
13helm/index.yaml
-
BINhelm/seaweedfs-3.43.tgz
-
0k8s/charts/seaweedfs/.helmignore
-
4k8s/charts/seaweedfs/Chart.yaml
-
14k8s/charts/seaweedfs/README.md
-
0k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
-
0k8s/charts/seaweedfs/templates/_helpers.tpl
-
0k8s/charts/seaweedfs/templates/ca-cert.yaml
-
0k8s/charts/seaweedfs/templates/cert-clusterissuer.yaml
-
0k8s/charts/seaweedfs/templates/client-cert.yaml
-
0k8s/charts/seaweedfs/templates/filer-cert.yaml
-
0k8s/charts/seaweedfs/templates/filer-service-client.yaml
-
0k8s/charts/seaweedfs/templates/filer-service.yaml
-
0k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml
-
2k8s/charts/seaweedfs/templates/filer-statefulset.yaml
-
0k8s/charts/seaweedfs/templates/ingress.yaml
-
0k8s/charts/seaweedfs/templates/master-cert.yaml
-
0k8s/charts/seaweedfs/templates/master-service.yaml
-
0k8s/charts/seaweedfs/templates/master-servicemonitor.yaml
-
4k8s/charts/seaweedfs/templates/master-statefulset.yaml
-
2k8s/charts/seaweedfs/templates/s3-deployment.yaml
-
0k8s/charts/seaweedfs/templates/s3-service.yaml
-
0k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml
-
0k8s/charts/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml
-
0k8s/charts/seaweedfs/templates/seaweedfs-s3-secret.yaml
-
0k8s/charts/seaweedfs/templates/secret-seaweedfs-db.yaml
-
0k8s/charts/seaweedfs/templates/security-configmap.yaml
-
0k8s/charts/seaweedfs/templates/service-account.yaml
-
0k8s/charts/seaweedfs/templates/volume-cert.yaml
-
0k8s/charts/seaweedfs/templates/volume-service.yaml
-
0k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml
-
4k8s/charts/seaweedfs/templates/volume-statefulset.yaml
-
4k8s/charts/seaweedfs/values.yaml
-
4other/java/s3copier/pom.xml
-
15other/java/s3copier/src/main/java/com/seaweedfs/s3/HighLevelMultipartUpload.java
-
3unmaintained/load_test/load_test_meta_tail/load_test_meta_tail.go
-
1weed/command/filer.go
-
2weed/command/filer_cat.go
-
20weed/command/filer_copy.go
-
2weed/command/filer_meta_backup.go
-
2weed/command/filer_remote_gateway.go
-
2weed/command/filer_remote_sync.go
-
4weed/command/filer_sync.go
-
2weed/command/iam.go
-
4weed/command/mount.go
-
3weed/command/mount_std.go
-
2weed/command/s3.go
-
30weed/command/scaffold/filer.toml
-
1weed/command/scaffold/replication.toml
-
1weed/command/server.go
-
2weed/command/webdav.go
-
2weed/filer/configuration.go
-
155weed/filer/filechunk_group.go
-
36weed/filer/filechunk_group_test.go
-
4weed/filer/filechunk_manifest.go
-
134weed/filer/filechunk_section.go
-
48weed/filer/filechunk_section_test.go
-
218weed/filer/filechunks.go
-
90weed/filer/filechunks_read.go
-
86weed/filer/filechunks_read_test.go
-
174weed/filer/filechunks_test.go
-
9weed/filer/filer.go
-
2weed/filer/filer_conf.go
-
2weed/filer/filer_notify_append.go
-
259weed/filer/interval_list.go
-
327weed/filer/interval_list_test.go
-
16weed/filer/meta_aggregator.go
-
14weed/filer/mysql/mysql_sql_gen.go
-
4weed/filer/mysql/mysql_store.go
-
4weed/filer/mysql2/mysql2_store.go
-
66weed/filer/reader_at.go
-
72weed/filer/reader_at_test.go
-
11weed/filer/reader_cache.go
@ -0,0 +1,22 @@ |
|||||
|
name: "helm: publish charts" |
||||
|
on: |
||||
|
push: |
||||
|
tags: |
||||
|
- '*' |
||||
|
|
||||
|
permissions: |
||||
|
contents: write |
||||
|
pages: write |
||||
|
|
||||
|
jobs: |
||||
|
release: |
||||
|
runs-on: ubuntu-latest |
||||
|
steps: |
||||
|
- uses: actions/checkout@v3 |
||||
|
- name: Publish Helm charts |
||||
|
uses: stefanprodan/helm-gh-pages@master |
||||
|
with: |
||||
|
token: ${{ secrets.GITHUB_TOKEN }} |
||||
|
charts_dir: k8s/charts |
||||
|
target_dir: helm |
||||
|
branch: master |
@ -0,0 +1,74 @@ |
|||||
|
# Contributor Covenant Code of Conduct |
||||
|
|
||||
|
## Our Pledge |
||||
|
|
||||
|
In the interest of fostering an open and welcoming environment, we as |
||||
|
contributors and maintainers pledge to make participation in our project and |
||||
|
our community a harassment-free experience for everyone, regardless of age, body |
||||
|
size, disability, ethnicity, gender identity and expression, level of experience, |
||||
|
nationality, personal appearance, race, religion, or sexual identity and |
||||
|
orientation. |
||||
|
|
||||
|
## Our Standards |
||||
|
|
||||
|
Examples of behavior that contributes to creating a positive environment |
||||
|
include: |
||||
|
|
||||
|
- Using welcoming and inclusive language |
||||
|
- Being respectful of differing viewpoints and experiences |
||||
|
- Gracefully accepting constructive criticism |
||||
|
- Focusing on what is best for the community |
||||
|
- Showing empathy towards other community members |
||||
|
|
||||
|
Examples of unacceptable behavior by participants include: |
||||
|
|
||||
|
- The use of sexualized language or imagery and unwelcome sexual attention or |
||||
|
advances |
||||
|
- Trolling, insulting/derogatory comments, and personal or political attacks |
||||
|
- Public or private harassment |
||||
|
- Publishing others' private information, such as a physical or electronic |
||||
|
address, without explicit permission |
||||
|
- Other conduct which could reasonably be considered inappropriate in a |
||||
|
professional setting |
||||
|
|
||||
|
## Our Responsibilities |
||||
|
|
||||
|
Project maintainers are responsible for clarifying the standards of acceptable |
||||
|
behavior and are expected to take appropriate and fair corrective action in |
||||
|
response to any instances of unacceptable behavior. |
||||
|
|
||||
|
Project maintainers have the right and responsibility to remove, edit, or |
||||
|
reject comments, commits, code, wiki edits, issues, and other contributions |
||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or |
||||
|
permanently any contributor for other behaviors that they deem inappropriate, |
||||
|
threatening, offensive, or harmful. |
||||
|
|
||||
|
## Scope |
||||
|
|
||||
|
This Code of Conduct applies both within project spaces and in public spaces |
||||
|
when an individual is representing the project or its community. Examples of |
||||
|
representing a project or community include using an official project e-mail |
||||
|
address, posting via an official social media account, or acting as an appointed |
||||
|
representative at an online or offline event. Representation of a project may be |
||||
|
further defined and clarified by project maintainers. |
||||
|
|
||||
|
## Enforcement |
||||
|
|
||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be |
||||
|
reported by contacting the project team at <enteremailhere>. All |
||||
|
complaints will be reviewed and investigated and will result in a response that |
||||
|
is deemed necessary and appropriate to the circumstances. The project team is |
||||
|
obligated to maintain confidentiality with regard to the reporter of an incident. |
||||
|
Further details of specific enforcement policies may be posted separately. |
||||
|
|
||||
|
Project maintainers who do not follow or enforce the Code of Conduct in good |
||||
|
faith may face temporary or permanent repercussions as determined by other |
||||
|
members of the project's leadership. |
||||
|
|
||||
|
## Attribution |
||||
|
|
||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, |
||||
|
available at [http://contributor-covenant.org/version/1/4][version] |
||||
|
|
||||
|
[homepage]: http://contributor-covenant.org |
||||
|
[version]: http://contributor-covenant.org/version/1/4/ |
770
go.sum
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,13 @@ |
|||||
|
apiVersion: v1 |
||||
|
entries: |
||||
|
seaweedfs: |
||||
|
- apiVersion: v1 |
||||
|
appVersion: "3.43" |
||||
|
created: "2023-02-21T01:05:06.654751634Z" |
||||
|
description: SeaweedFS |
||||
|
digest: b8b9071dd8624a06d47b865a0a0e64d1093c2f0406ede47f40019fe9ea7e82a5 |
||||
|
name: seaweedfs |
||||
|
urls: |
||||
|
- https://seaweedfs.github.io/seaweedfs/helm/seaweedfs-3.43.tgz |
||||
|
version: "3.43" |
||||
|
generated: "2023-02-21T01:05:06.652866698Z" |
@ -1,5 +1,5 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
appVersion: "3.37" |
|
||||
version: "3.37" |
|
||||
|
appVersion: "3.43" |
||||
|
version: "3.43" |
@ -0,0 +1,155 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/wdclient" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
type ChunkGroup struct { |
||||
|
lookupFn wdclient.LookupFileIdFunctionType |
||||
|
chunkCache chunk_cache.ChunkCache |
||||
|
sections map[SectionIndex]*FileChunkSection |
||||
|
sectionsLock sync.RWMutex |
||||
|
readerCache *ReaderCache |
||||
|
} |
||||
|
|
||||
|
func NewChunkGroup(lookupFn wdclient.LookupFileIdFunctionType, chunkCache chunk_cache.ChunkCache, chunks []*filer_pb.FileChunk) (*ChunkGroup, error) { |
||||
|
group := &ChunkGroup{ |
||||
|
lookupFn: lookupFn, |
||||
|
chunkCache: chunkCache, |
||||
|
sections: make(map[SectionIndex]*FileChunkSection), |
||||
|
readerCache: NewReaderCache(32, chunkCache, lookupFn), |
||||
|
} |
||||
|
|
||||
|
err := group.SetChunks(chunks) |
||||
|
return group, err |
||||
|
} |
||||
|
|
||||
|
func (group *ChunkGroup) AddChunk(chunk *filer_pb.FileChunk) error { |
||||
|
|
||||
|
group.sectionsLock.Lock() |
||||
|
defer group.sectionsLock.Unlock() |
||||
|
|
||||
|
sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) |
||||
|
for si := sectionIndexStart; si < sectionIndexStop+1; si++ { |
||||
|
section, found := group.sections[si] |
||||
|
if !found { |
||||
|
section = NewFileChunkSection(si) |
||||
|
group.sections[si] = section |
||||
|
} |
||||
|
section.addChunk(chunk) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (group *ChunkGroup) ReadDataAt(fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { |
||||
|
|
||||
|
group.sectionsLock.RLock() |
||||
|
defer group.sectionsLock.RUnlock() |
||||
|
|
||||
|
sectionIndexStart, sectionIndexStop := SectionIndex(offset/SectionSize), SectionIndex((offset+int64(len(buff)))/SectionSize) |
||||
|
for si := sectionIndexStart; si < sectionIndexStop+1; si++ { |
||||
|
section, found := group.sections[si] |
||||
|
rangeStart, rangeStop := max(offset, int64(si*SectionSize)), min(offset+int64(len(buff)), int64((si+1)*SectionSize)) |
||||
|
if !found { |
||||
|
for i := rangeStart; i < rangeStop; i++ { |
||||
|
buff[i-offset] = 0 |
||||
|
} |
||||
|
continue |
||||
|
} |
||||
|
xn, xTsNs, xErr := section.readDataAt(group, fileSize, buff[rangeStart-offset:rangeStop-offset], rangeStart) |
||||
|
if xErr != nil { |
||||
|
err = xErr |
||||
|
} |
||||
|
n += xn |
||||
|
tsNs = max(tsNs, xTsNs) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (group *ChunkGroup) SetChunks(chunks []*filer_pb.FileChunk) error { |
||||
|
group.sectionsLock.RLock() |
||||
|
defer group.sectionsLock.RUnlock() |
||||
|
|
||||
|
var dataChunks []*filer_pb.FileChunk |
||||
|
for _, chunk := range chunks { |
||||
|
|
||||
|
if !chunk.IsChunkManifest { |
||||
|
dataChunks = append(dataChunks, chunk) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
resolvedChunks, err := ResolveOneChunkManifest(group.lookupFn, chunk) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
dataChunks = append(dataChunks, resolvedChunks...) |
||||
|
} |
||||
|
|
||||
|
sections := make(map[SectionIndex]*FileChunkSection) |
||||
|
|
||||
|
for _, chunk := range dataChunks { |
||||
|
sectionIndexStart, sectionIndexStop := SectionIndex(chunk.Offset/SectionSize), SectionIndex((chunk.Offset+int64(chunk.Size))/SectionSize) |
||||
|
for si := sectionIndexStart; si < sectionIndexStop+1; si++ { |
||||
|
section, found := sections[si] |
||||
|
if !found { |
||||
|
section = NewFileChunkSection(si) |
||||
|
sections[si] = section |
||||
|
} |
||||
|
section.chunks = append(section.chunks, chunk) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
group.sections = sections |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
const ( |
||||
|
// see weedfs_file_lseek.go
|
||||
|
SEEK_DATA uint32 = 3 // seek to next data after the offset
|
||||
|
// SEEK_HOLE uint32 = 4 // seek to next hole after the offset
|
||||
|
) |
||||
|
|
||||
|
// FIXME: needa tests
|
||||
|
func (group *ChunkGroup) SearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) { |
||||
|
group.sectionsLock.RLock() |
||||
|
defer group.sectionsLock.RUnlock() |
||||
|
|
||||
|
return group.doSearchChunks(offset, fileSize, whence) |
||||
|
} |
||||
|
|
||||
|
func (group *ChunkGroup) doSearchChunks(offset, fileSize int64, whence uint32) (found bool, out int64) { |
||||
|
|
||||
|
sectionIndex, maxSectionIndex := SectionIndex(offset/SectionSize), SectionIndex(fileSize/SectionSize) |
||||
|
if whence == SEEK_DATA { |
||||
|
for si := sectionIndex; si < maxSectionIndex+1; si++ { |
||||
|
section, foundSection := group.sections[si] |
||||
|
if !foundSection { |
||||
|
continue |
||||
|
} |
||||
|
sectionStart := section.DataStartOffset(group, offset, fileSize) |
||||
|
if sectionStart == -1 { |
||||
|
continue |
||||
|
} |
||||
|
return true, sectionStart |
||||
|
} |
||||
|
return false, 0 |
||||
|
} else { |
||||
|
// whence == SEEK_HOLE
|
||||
|
for si := sectionIndex; si < maxSectionIndex; si++ { |
||||
|
section, foundSection := group.sections[si] |
||||
|
if !foundSection { |
||||
|
return true, offset |
||||
|
} |
||||
|
holeStart := section.NextStopOffset(group, offset, fileSize) |
||||
|
if holeStart%SectionSize == 0 { |
||||
|
continue |
||||
|
} |
||||
|
return true, holeStart |
||||
|
} |
||||
|
return true, fileSize |
||||
|
} |
||||
|
} |
@ -0,0 +1,36 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestChunkGroup_doSearchChunks(t *testing.T) { |
||||
|
type fields struct { |
||||
|
sections map[SectionIndex]*FileChunkSection |
||||
|
} |
||||
|
type args struct { |
||||
|
offset int64 |
||||
|
fileSize int64 |
||||
|
whence uint32 |
||||
|
} |
||||
|
tests := []struct { |
||||
|
name string |
||||
|
fields fields |
||||
|
args args |
||||
|
wantFound bool |
||||
|
wantOut int64 |
||||
|
}{ |
||||
|
// TODO: Add test cases.
|
||||
|
} |
||||
|
for _, tt := range tests { |
||||
|
t.Run(tt.name, func(t *testing.T) { |
||||
|
group := &ChunkGroup{ |
||||
|
sections: tt.fields.sections, |
||||
|
} |
||||
|
gotFound, gotOut := group.doSearchChunks(tt.args.offset, tt.args.fileSize, tt.args.whence) |
||||
|
assert.Equalf(t, tt.wantFound, gotFound, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) |
||||
|
assert.Equalf(t, tt.wantOut, gotOut, "doSearchChunks(%v, %v, %v)", tt.args.offset, tt.args.fileSize, tt.args.whence) |
||||
|
}) |
||||
|
} |
||||
|
} |
@ -0,0 +1,134 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
const SectionSize = 2 * 1024 * 1024 * 32 // 64MiB
|
||||
|
type SectionIndex int64 |
||||
|
type FileChunkSection struct { |
||||
|
sectionIndex SectionIndex |
||||
|
chunks []*filer_pb.FileChunk |
||||
|
visibleIntervals *IntervalList[*VisibleInterval] |
||||
|
chunkViews *IntervalList[*ChunkView] |
||||
|
reader *ChunkReadAt |
||||
|
lock sync.Mutex |
||||
|
} |
||||
|
|
||||
|
func NewFileChunkSection(si SectionIndex) *FileChunkSection { |
||||
|
return &FileChunkSection{ |
||||
|
sectionIndex: si, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (section *FileChunkSection) addChunk(chunk *filer_pb.FileChunk) error { |
||||
|
section.lock.Lock() |
||||
|
defer section.lock.Unlock() |
||||
|
|
||||
|
start, stop := max(int64(section.sectionIndex)*SectionSize, chunk.Offset), min(((int64(section.sectionIndex)+1)*SectionSize), chunk.Offset+int64(chunk.Size)) |
||||
|
|
||||
|
section.chunks = append(section.chunks, chunk) |
||||
|
|
||||
|
if section.visibleIntervals == nil { |
||||
|
section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) |
||||
|
} else { |
||||
|
MergeIntoVisibles(section.visibleIntervals, start, stop, chunk) |
||||
|
garbageFileIds := FindGarbageChunks(section.visibleIntervals, start, stop) |
||||
|
removeGarbageChunks(section, garbageFileIds) |
||||
|
} |
||||
|
|
||||
|
if section.chunkViews != nil { |
||||
|
MergeIntoChunkViews(section.chunkViews, start, stop, chunk) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func removeGarbageChunks(section *FileChunkSection, garbageFileIds map[string]struct{}) { |
||||
|
for i := 0; i < len(section.chunks); { |
||||
|
t := section.chunks[i] |
||||
|
length := len(section.chunks) |
||||
|
if _, found := garbageFileIds[t.FileId]; found { |
||||
|
if i < length-1 { |
||||
|
section.chunks[i] = section.chunks[length-1] |
||||
|
} |
||||
|
section.chunks = section.chunks[:length-1] |
||||
|
} else { |
||||
|
i++ |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (section *FileChunkSection) setupForRead(group *ChunkGroup, fileSize int64) { |
||||
|
if section.visibleIntervals == nil { |
||||
|
section.visibleIntervals = readResolvedChunks(section.chunks, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) |
||||
|
section.chunks, _ = SeparateGarbageChunks(section.visibleIntervals, section.chunks) |
||||
|
if section.reader != nil { |
||||
|
_ = section.reader.Close() |
||||
|
section.reader = nil |
||||
|
} |
||||
|
} |
||||
|
if section.chunkViews == nil { |
||||
|
section.chunkViews = ViewFromVisibleIntervals(section.visibleIntervals, int64(section.sectionIndex)*SectionSize, (int64(section.sectionIndex)+1)*SectionSize) |
||||
|
} |
||||
|
|
||||
|
if section.reader == nil { |
||||
|
section.reader = NewChunkReaderAtFromClient(group.readerCache, section.chunkViews, min(int64(section.sectionIndex+1)*SectionSize, fileSize)) |
||||
|
} |
||||
|
section.reader.fileSize = fileSize |
||||
|
} |
||||
|
|
||||
|
func (section *FileChunkSection) readDataAt(group *ChunkGroup, fileSize int64, buff []byte, offset int64) (n int, tsNs int64, err error) { |
||||
|
section.lock.Lock() |
||||
|
defer section.lock.Unlock() |
||||
|
|
||||
|
section.setupForRead(group, fileSize) |
||||
|
|
||||
|
return section.reader.ReadAtWithTime(buff, offset) |
||||
|
} |
||||
|
|
||||
|
func (section *FileChunkSection) DataStartOffset(group *ChunkGroup, offset int64, fileSize int64) int64 { |
||||
|
section.lock.Lock() |
||||
|
defer section.lock.Unlock() |
||||
|
|
||||
|
section.setupForRead(group, fileSize) |
||||
|
|
||||
|
for x := section.visibleIntervals.Front(); x != nil; x = x.Next { |
||||
|
visible := x.Value |
||||
|
if visible.stop <= offset { |
||||
|
continue |
||||
|
} |
||||
|
if offset < visible.start { |
||||
|
return offset |
||||
|
} |
||||
|
return offset |
||||
|
} |
||||
|
return -1 |
||||
|
} |
||||
|
|
||||
|
func (section *FileChunkSection) NextStopOffset(group *ChunkGroup, offset int64, fileSize int64) int64 { |
||||
|
section.lock.Lock() |
||||
|
defer section.lock.Unlock() |
||||
|
|
||||
|
section.setupForRead(group, fileSize) |
||||
|
|
||||
|
isAfterOffset := false |
||||
|
for x := section.visibleIntervals.Front(); x != nil; x = x.Next { |
||||
|
visible := x.Value |
||||
|
if !isAfterOffset { |
||||
|
if visible.stop <= offset { |
||||
|
continue |
||||
|
} |
||||
|
isAfterOffset = true |
||||
|
} |
||||
|
if offset < visible.start { |
||||
|
return offset |
||||
|
} |
||||
|
// now visible.start <= offset
|
||||
|
if offset < visible.stop { |
||||
|
offset = visible.stop |
||||
|
} |
||||
|
} |
||||
|
return offset |
||||
|
} |
@ -0,0 +1,48 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func Test_removeGarbageChunks(t *testing.T) { |
||||
|
section := NewFileChunkSection(0) |
||||
|
section.chunks = append(section.chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "0", |
||||
|
Offset: 0, |
||||
|
Size: 1, |
||||
|
ModifiedTsNs: 0, |
||||
|
}) |
||||
|
section.chunks = append(section.chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "1", |
||||
|
Offset: 1, |
||||
|
Size: 1, |
||||
|
ModifiedTsNs: 1, |
||||
|
}) |
||||
|
section.chunks = append(section.chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "2", |
||||
|
Offset: 2, |
||||
|
Size: 1, |
||||
|
ModifiedTsNs: 2, |
||||
|
}) |
||||
|
section.chunks = append(section.chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "3", |
||||
|
Offset: 3, |
||||
|
Size: 1, |
||||
|
ModifiedTsNs: 3, |
||||
|
}) |
||||
|
section.chunks = append(section.chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "4", |
||||
|
Offset: 4, |
||||
|
Size: 1, |
||||
|
ModifiedTsNs: 4, |
||||
|
}) |
||||
|
garbageFileIds := make(map[string]struct{}) |
||||
|
garbageFileIds["0"] = struct{}{} |
||||
|
garbageFileIds["2"] = struct{}{} |
||||
|
garbageFileIds["4"] = struct{}{} |
||||
|
removeGarbageChunks(section, garbageFileIds) |
||||
|
if len(section.chunks) != 2 { |
||||
|
t.Errorf("remove chunk 2 failed") |
||||
|
} |
||||
|
} |
@ -0,0 +1,259 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"math" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
type IntervalValue interface { |
||||
|
SetStartStop(start, stop int64) |
||||
|
Clone() IntervalValue |
||||
|
} |
||||
|
|
||||
|
type Interval[T IntervalValue] struct { |
||||
|
StartOffset int64 |
||||
|
StopOffset int64 |
||||
|
TsNs int64 |
||||
|
Value T |
||||
|
Prev *Interval[T] |
||||
|
Next *Interval[T] |
||||
|
} |
||||
|
|
||||
|
func (interval *Interval[T]) Size() int64 { |
||||
|
return interval.StopOffset - interval.StartOffset |
||||
|
} |
||||
|
|
||||
|
// IntervalList mark written intervals within one page chunk
|
||||
|
type IntervalList[T IntervalValue] struct { |
||||
|
head *Interval[T] |
||||
|
tail *Interval[T] |
||||
|
Lock sync.Mutex |
||||
|
} |
||||
|
|
||||
|
func NewIntervalList[T IntervalValue]() *IntervalList[T] { |
||||
|
list := &IntervalList[T]{ |
||||
|
head: &Interval[T]{ |
||||
|
StartOffset: -1, |
||||
|
StopOffset: -1, |
||||
|
}, |
||||
|
tail: &Interval[T]{ |
||||
|
StartOffset: math.MaxInt64, |
||||
|
StopOffset: math.MaxInt64, |
||||
|
}, |
||||
|
} |
||||
|
return list |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) Front() (interval *Interval[T]) { |
||||
|
return list.head.Next |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) AppendInterval(interval *Interval[T]) { |
||||
|
list.Lock.Lock() |
||||
|
defer list.Lock.Unlock() |
||||
|
|
||||
|
if list.head.Next == nil { |
||||
|
list.head.Next = interval |
||||
|
} |
||||
|
interval.Prev = list.tail.Prev |
||||
|
if list.tail.Prev != nil { |
||||
|
list.tail.Prev.Next = interval |
||||
|
} |
||||
|
list.tail.Prev = interval |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) Overlay(startOffset, stopOffset, tsNs int64, value T) { |
||||
|
if startOffset >= stopOffset { |
||||
|
return |
||||
|
} |
||||
|
interval := &Interval[T]{ |
||||
|
StartOffset: startOffset, |
||||
|
StopOffset: stopOffset, |
||||
|
TsNs: tsNs, |
||||
|
Value: value, |
||||
|
} |
||||
|
|
||||
|
list.Lock.Lock() |
||||
|
defer list.Lock.Unlock() |
||||
|
|
||||
|
list.overlayInterval(interval) |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) InsertInterval(startOffset, stopOffset, tsNs int64, value T) { |
||||
|
interval := &Interval[T]{ |
||||
|
StartOffset: startOffset, |
||||
|
StopOffset: stopOffset, |
||||
|
TsNs: tsNs, |
||||
|
Value: value, |
||||
|
} |
||||
|
|
||||
|
list.Lock.Lock() |
||||
|
defer list.Lock.Unlock() |
||||
|
|
||||
|
value.SetStartStop(startOffset, stopOffset) |
||||
|
list.insertInterval(interval) |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) insertInterval(interval *Interval[T]) { |
||||
|
prev := list.head |
||||
|
next := prev.Next |
||||
|
|
||||
|
for interval.StartOffset < interval.StopOffset { |
||||
|
if next == nil { |
||||
|
// add to the end
|
||||
|
list.insertBetween(prev, interval, list.tail) |
||||
|
break |
||||
|
} |
||||
|
|
||||
|
// interval is ahead of the next
|
||||
|
if interval.StopOffset <= next.StartOffset { |
||||
|
list.insertBetween(prev, interval, next) |
||||
|
break |
||||
|
} |
||||
|
|
||||
|
// interval is after the next
|
||||
|
if next.StopOffset <= interval.StartOffset { |
||||
|
prev = next |
||||
|
next = next.Next |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
// intersecting next and interval
|
||||
|
if interval.TsNs >= next.TsNs { |
||||
|
// interval is newer
|
||||
|
if next.StartOffset < interval.StartOffset { |
||||
|
// left side of next is ahead of interval
|
||||
|
t := &Interval[T]{ |
||||
|
StartOffset: next.StartOffset, |
||||
|
StopOffset: interval.StartOffset, |
||||
|
TsNs: next.TsNs, |
||||
|
Value: next.Value.Clone().(T), |
||||
|
} |
||||
|
t.Value.SetStartStop(t.StartOffset, t.StopOffset) |
||||
|
list.insertBetween(prev, t, interval) |
||||
|
next.StartOffset = interval.StartOffset |
||||
|
next.Value.SetStartStop(next.StartOffset, next.StopOffset) |
||||
|
prev = t |
||||
|
} |
||||
|
if interval.StopOffset < next.StopOffset { |
||||
|
// right side of next is after interval
|
||||
|
next.StartOffset = interval.StopOffset |
||||
|
next.Value.SetStartStop(next.StartOffset, next.StopOffset) |
||||
|
list.insertBetween(prev, interval, next) |
||||
|
break |
||||
|
} else { |
||||
|
// next is covered
|
||||
|
prev.Next = interval |
||||
|
next = next.Next |
||||
|
} |
||||
|
} else { |
||||
|
// next is newer
|
||||
|
if interval.StartOffset < next.StartOffset { |
||||
|
// left side of interval is ahead of next
|
||||
|
t := &Interval[T]{ |
||||
|
StartOffset: interval.StartOffset, |
||||
|
StopOffset: next.StartOffset, |
||||
|
TsNs: interval.TsNs, |
||||
|
Value: interval.Value.Clone().(T), |
||||
|
} |
||||
|
t.Value.SetStartStop(t.StartOffset, t.StopOffset) |
||||
|
list.insertBetween(prev, t, next) |
||||
|
interval.StartOffset = next.StartOffset |
||||
|
interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) |
||||
|
} |
||||
|
if next.StopOffset < interval.StopOffset { |
||||
|
// right side of interval is after next
|
||||
|
interval.StartOffset = next.StopOffset |
||||
|
interval.Value.SetStartStop(interval.StartOffset, interval.StopOffset) |
||||
|
} else { |
||||
|
// interval is covered
|
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) insertBetween(a, interval, b *Interval[T]) { |
||||
|
a.Next = interval |
||||
|
b.Prev = interval |
||||
|
if a != list.head { |
||||
|
interval.Prev = a |
||||
|
} |
||||
|
if b != list.tail { |
||||
|
interval.Next = b |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) overlayInterval(interval *Interval[T]) { |
||||
|
|
||||
|
//t := list.head
|
||||
|
//for ; t.Next != nil; t = t.Next {
|
||||
|
// if t.TsNs > interval.TsNs {
|
||||
|
// println("writes is out of order", t.TsNs-interval.TsNs, "ns")
|
||||
|
// }
|
||||
|
//}
|
||||
|
|
||||
|
p := list.head |
||||
|
for ; p.Next != nil && p.Next.StopOffset <= interval.StartOffset; p = p.Next { |
||||
|
} |
||||
|
q := list.tail |
||||
|
for ; q.Prev != nil && q.Prev.StartOffset >= interval.StopOffset; q = q.Prev { |
||||
|
} |
||||
|
|
||||
|
// left side
|
||||
|
// interval after p.Next start
|
||||
|
if p.Next != nil && p.Next.StartOffset < interval.StartOffset { |
||||
|
t := &Interval[T]{ |
||||
|
StartOffset: p.Next.StartOffset, |
||||
|
StopOffset: interval.StartOffset, |
||||
|
TsNs: p.Next.TsNs, |
||||
|
Value: p.Next.Value, |
||||
|
} |
||||
|
p.Next = t |
||||
|
if p != list.head { |
||||
|
t.Prev = p |
||||
|
} |
||||
|
t.Next = interval |
||||
|
interval.Prev = t |
||||
|
} else { |
||||
|
p.Next = interval |
||||
|
if p != list.head { |
||||
|
interval.Prev = p |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// right side
|
||||
|
// interval ends before p.Prev
|
||||
|
if q.Prev != nil && interval.StopOffset < q.Prev.StopOffset { |
||||
|
t := &Interval[T]{ |
||||
|
StartOffset: interval.StopOffset, |
||||
|
StopOffset: q.Prev.StopOffset, |
||||
|
TsNs: q.Prev.TsNs, |
||||
|
Value: q.Prev.Value, |
||||
|
} |
||||
|
q.Prev = t |
||||
|
if q != list.tail { |
||||
|
t.Next = q |
||||
|
} |
||||
|
interval.Next = t |
||||
|
t.Prev = interval |
||||
|
} else { |
||||
|
q.Prev = interval |
||||
|
if q != list.tail { |
||||
|
interval.Next = q |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (list *IntervalList[T]) Len() int { |
||||
|
list.Lock.Lock() |
||||
|
defer list.Lock.Unlock() |
||||
|
|
||||
|
var count int |
||||
|
for t := list.head; t != nil; t = t.Next { |
||||
|
count++ |
||||
|
} |
||||
|
return count - 1 |
||||
|
} |
@ -0,0 +1,327 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
type IntervalInt int |
||||
|
|
||||
|
func (i IntervalInt) SetStartStop(start, stop int64) { |
||||
|
} |
||||
|
func (i IntervalInt) Clone() IntervalValue { |
||||
|
return i |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(0, 100, 1, 1) |
||||
|
list.Overlay(50, 150, 2, 2) |
||||
|
list.Overlay(200, 250, 3, 3) |
||||
|
list.Overlay(225, 250, 4, 4) |
||||
|
list.Overlay(175, 210, 5, 5) |
||||
|
list.Overlay(0, 25, 6, 6) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 6, list.Len()) |
||||
|
println() |
||||
|
list.Overlay(50, 150, 7, 7) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 6, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay2(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(0, 50, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay3(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
assert.Equal(t, 1, list.Len()) |
||||
|
|
||||
|
list.Overlay(0, 60, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay4(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(0, 100, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 1, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay5(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(0, 110, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 1, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay6(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(50, 110, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 1, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay7(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(50, 90, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay8(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(60, 90, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 3, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay9(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(60, 100, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay10(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(50, 100, 1, 1) |
||||
|
list.Overlay(60, 110, 2, 2) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_Overlay11(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.Overlay(0, 100, 1, 1) |
||||
|
list.Overlay(100, 110, 2, 2) |
||||
|
list.Overlay(0, 90, 3, 3) |
||||
|
list.Overlay(0, 80, 4, 4) |
||||
|
list.Overlay(0, 90, 5, 5) |
||||
|
list.Overlay(90, 90, 6, 6) |
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 3, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval1(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval2(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(0, 25, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval3(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(0, 75, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 3, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval4(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(0, 225, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval5(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(0, 225, 5, 5) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval6(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(0, 275, 1, 1) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 5, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval7(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(75, 275, 1, 1) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 4, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval8(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(75, 275, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 4, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval9(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(50, 150, 2, 2) |
||||
|
list.InsertInterval(200, 250, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(50, 150, 3, 3) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 2, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval10(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(50, 100, 2, 2) |
||||
|
|
||||
|
list.InsertInterval(200, 300, 4, 4) |
||||
|
|
||||
|
list.InsertInterval(100, 200, 5, 5) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 3, list.Len()) |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertInterval11(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalInt]() |
||||
|
|
||||
|
list.InsertInterval(0, 64, 1, 1) |
||||
|
|
||||
|
list.InsertInterval(72, 136, 3, 3) |
||||
|
|
||||
|
list.InsertInterval(64, 128, 2, 2) |
||||
|
|
||||
|
list.InsertInterval(68, 72, 4, 4) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 4, list.Len()) |
||||
|
} |
||||
|
|
||||
|
type IntervalStruct struct { |
||||
|
x int |
||||
|
start int64 |
||||
|
stop int64 |
||||
|
} |
||||
|
|
||||
|
func newIntervalStruct(i int) IntervalStruct { |
||||
|
return IntervalStruct{ |
||||
|
x: i, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (i IntervalStruct) SetStartStop(start, stop int64) { |
||||
|
i.start, i.stop = start, stop |
||||
|
} |
||||
|
func (i IntervalStruct) Clone() IntervalValue { |
||||
|
return &IntervalStruct{ |
||||
|
x: i.x, |
||||
|
start: i.start, |
||||
|
stop: i.stop, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestIntervalList_insertIntervalStruct(t *testing.T) { |
||||
|
list := NewIntervalList[IntervalStruct]() |
||||
|
|
||||
|
list.InsertInterval(0, 64, 1, newIntervalStruct(1)) |
||||
|
|
||||
|
list.InsertInterval(64, 72, 2, newIntervalStruct(2)) |
||||
|
|
||||
|
list.InsertInterval(72, 136, 3, newIntervalStruct(3)) |
||||
|
|
||||
|
list.InsertInterval(64, 68, 4, newIntervalStruct(4)) |
||||
|
|
||||
|
for p := list.Front(); p != nil; p = p.Next { |
||||
|
fmt.Printf("[%d,%d) %d %d\n", p.StartOffset, p.StopOffset, p.TsNs, p.Value) |
||||
|
} |
||||
|
assert.Equal(t, 4, list.Len()) |
||||
|
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue