chrislu
2 years ago
77 changed files with 2703 additions and 1938 deletions
-
45go.mod
-
106go.sum
-
4k8s/helm_charts2/Chart.yaml
-
3weed/command/filer_remote_gateway_buckets.go
-
2weed/command/filer_remote_sync.go
-
19weed/command/filer_remote_sync_dir.go
-
2weed/command/filer_sync_jobs.go
-
7weed/command/s3.go
-
1weed/command/server.go
-
26weed/command/volume.go
-
10weed/filer/arangodb/arangodb_store.go
-
22weed/filer/arangodb/helpers.go
-
33weed/filer/arangodb/readme.md
-
2weed/filer/filer_deletion.go
-
13weed/mount/filehandle.go
-
2weed/mount/inode_to_path.go
-
9weed/mount/weedfs_file_copy_range.go
-
5weed/mount/weedfs_file_lseek.go
-
5weed/mount/weedfs_file_read.go
-
8weed/mount/weedfs_file_sync.go
-
5weed/mount/weedfs_file_write.go
-
7weed/operation/upload_content.go
-
2weed/pb/filer_pb/filer.pb.go
-
5weed/pb/filer_pb/filer_grpc.pb.go
-
2weed/pb/iam_pb/iam.pb.go
-
5weed/pb/iam_pb/iam_grpc.pb.go
-
16weed/pb/master.proto
-
654weed/pb/master_pb/master.pb.go
-
73weed/pb/master_pb/master_grpc.pb.go
-
5weed/pb/master_pb/master_helper.go
-
2weed/pb/mount_pb/mount.pb.go
-
5weed/pb/mount_pb/mount_grpc.pb.go
-
4weed/pb/mq_pb/mq_grpc.pb.go
-
6weed/pb/remote_pb/remote.pb.go
-
2weed/pb/s3_pb/s3.pb.go
-
5weed/pb/s3_pb/s3_grpc.pb.go
-
15weed/pb/volume_server.proto
-
2374weed/pb/volume_server_pb/volume_server.pb.go
-
45weed/pb/volume_server_pb/volume_server_grpc.pb.go
-
25weed/server/common.go
-
27weed/server/filer_server_handlers_write_upload.go
-
49weed/server/master_grpc_server.go
-
26weed/server/volume_grpc_admin.go
-
29weed/server/volume_grpc_client_to_master.go
-
29weed/server/volume_grpc_read_write.go
-
2weed/server/volume_server_handlers.go
-
11weed/server/volume_server_handlers_read.go
-
7weed/server/volume_server_handlers_write.go
-
138weed/server/volume_server_tcp_handlers_write.go
-
41weed/shell/command_volume_balance.go
-
6weed/shell/command_volume_fix_replication.go
-
55weed/shell/command_volume_fsck.go
-
2weed/shell/command_volume_list_test.go
-
0weed/shell/volume.list.txt
-
8weed/storage/erasure_coding/ec_volume.go
-
29weed/storage/idx/binary_search.go
-
57weed/storage/idx_binary_search_test.go
-
4weed/storage/needle/needle_read.go
-
33weed/storage/needle/needle_read_page.go
-
82weed/storage/needle/needle_read_test.go
-
10weed/storage/needle_map_leveldb.go
-
8weed/storage/store.go
-
77weed/storage/volume_read.go
-
91weed/storage/volume_read_test.go
-
105weed/storage/volume_stream_write.go
-
11weed/topology/configuration.go
-
6weed/topology/data_center.go
-
16weed/topology/data_node.go
-
6weed/topology/disk.go
-
8weed/topology/node.go
-
6weed/topology/rack.go
-
2weed/topology/store_replicate.go
-
6weed/topology/topology.go
-
2weed/util/constants.go
-
26weed/wdclient/masterclient.go
-
4weed/wdclient/vid_map.go
-
41weed/wdclient/vid_map_test.go
@ -1,5 +1,5 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
appVersion: "3.25" |
|
||||
version: "3.25" |
|
||||
|
appVersion: "3.27" |
||||
|
version: "3.27" |
654
weed/pb/master_pb/master.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,5 @@ |
|||||
|
package master_pb |
||||
|
|
||||
|
func (v *VolumeLocation) IsEmptyUrl() bool { |
||||
|
return v.Url == "" || v.Url == ":0" |
||||
|
} |
2374
weed/pb/volume_server_pb/volume_server.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -1,138 +0,0 @@ |
|||||
package weed_server |
|
||||
|
|
||||
import ( |
|
||||
"bufio" |
|
||||
"fmt" |
|
||||
"io" |
|
||||
"net" |
|
||||
"strings" |
|
||||
|
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/util" |
|
||||
) |
|
||||
|
|
||||
func (vs *VolumeServer) HandleTcpConnection(c net.Conn) { |
|
||||
defer c.Close() |
|
||||
|
|
||||
glog.V(0).Infof("Serving writes from %s", c.RemoteAddr().String()) |
|
||||
|
|
||||
bufReader := bufio.NewReaderSize(c, 1024*1024) |
|
||||
bufWriter := bufio.NewWriterSize(c, 1024*1024) |
|
||||
|
|
||||
for { |
|
||||
cmd, err := bufReader.ReadString('\n') |
|
||||
if err != nil { |
|
||||
if err != io.EOF { |
|
||||
glog.Errorf("read command from %s: %v", c.RemoteAddr().String(), err) |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
cmd = cmd[:len(cmd)-1] |
|
||||
switch cmd[0] { |
|
||||
case '+': |
|
||||
fileId := cmd[1:] |
|
||||
err = vs.handleTcpPut(fileId, bufReader) |
|
||||
if err == nil { |
|
||||
bufWriter.Write([]byte("+OK\n")) |
|
||||
} else { |
|
||||
bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) |
|
||||
} |
|
||||
case '-': |
|
||||
fileId := cmd[1:] |
|
||||
err = vs.handleTcpDelete(fileId) |
|
||||
if err == nil { |
|
||||
bufWriter.Write([]byte("+OK\n")) |
|
||||
} else { |
|
||||
bufWriter.Write([]byte("-ERR " + string(err.Error()) + "\n")) |
|
||||
} |
|
||||
case '?': |
|
||||
fileId := cmd[1:] |
|
||||
err = vs.handleTcpGet(fileId, bufWriter) |
|
||||
case '!': |
|
||||
bufWriter.Flush() |
|
||||
} |
|
||||
|
|
||||
} |
|
||||
|
|
||||
} |
|
||||
|
|
||||
func (vs *VolumeServer) handleTcpGet(fileId string, writer *bufio.Writer) (err error) { |
|
||||
|
|
||||
volumeId, n, err2 := vs.parseFileId(fileId) |
|
||||
if err2 != nil { |
|
||||
return err2 |
|
||||
} |
|
||||
|
|
||||
volume := vs.store.GetVolume(volumeId) |
|
||||
if volume == nil { |
|
||||
return fmt.Errorf("volume %d not found", volumeId) |
|
||||
} |
|
||||
|
|
||||
err = volume.StreamRead(n, writer) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (vs *VolumeServer) handleTcpPut(fileId string, bufReader *bufio.Reader) (err error) { |
|
||||
|
|
||||
volumeId, n, err2 := vs.parseFileId(fileId) |
|
||||
if err2 != nil { |
|
||||
return err2 |
|
||||
} |
|
||||
|
|
||||
volume := vs.store.GetVolume(volumeId) |
|
||||
if volume == nil { |
|
||||
return fmt.Errorf("volume %d not found", volumeId) |
|
||||
} |
|
||||
|
|
||||
sizeBuf := make([]byte, 4) |
|
||||
if _, err = bufReader.Read(sizeBuf); err != nil { |
|
||||
return err |
|
||||
} |
|
||||
dataSize := util.BytesToUint32(sizeBuf) |
|
||||
|
|
||||
err = volume.StreamWrite(n, bufReader, dataSize) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (vs *VolumeServer) handleTcpDelete(fileId string) (err error) { |
|
||||
|
|
||||
volumeId, n, err2 := vs.parseFileId(fileId) |
|
||||
if err2 != nil { |
|
||||
return err2 |
|
||||
} |
|
||||
|
|
||||
_, err = vs.store.DeleteVolumeNeedle(volumeId, n) |
|
||||
if err != nil { |
|
||||
return err |
|
||||
} |
|
||||
|
|
||||
return nil |
|
||||
} |
|
||||
|
|
||||
func (vs *VolumeServer) parseFileId(fileId string) (needle.VolumeId, *needle.Needle, error) { |
|
||||
|
|
||||
commaIndex := strings.LastIndex(fileId, ",") |
|
||||
if commaIndex <= 0 { |
|
||||
return 0, nil, fmt.Errorf("unknown fileId %s", fileId) |
|
||||
} |
|
||||
|
|
||||
vid, fid := fileId[0:commaIndex], fileId[commaIndex+1:] |
|
||||
|
|
||||
volumeId, ve := needle.NewVolumeId(vid) |
|
||||
if ve != nil { |
|
||||
return 0, nil, fmt.Errorf("unknown volume id in fileId %s", fileId) |
|
||||
} |
|
||||
|
|
||||
n := new(needle.Needle) |
|
||||
n.ParsePath(fid) |
|
||||
return volumeId, n, nil |
|
||||
} |
|
@ -0,0 +1,29 @@ |
|||||
|
package idx |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/types" |
||||
|
) |
||||
|
|
||||
|
// firstInvalidIndex find the first index the failed lessThanOrEqualToFn function's requirement.
|
||||
|
func FirstInvalidIndex(bytes []byte, lessThanOrEqualToFn func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error)) (int, error) { |
||||
|
left, right := 0, len(bytes)/types.NeedleMapEntrySize-1 |
||||
|
index := right + 1 |
||||
|
for left <= right { |
||||
|
mid := left + (right-left)>>1 |
||||
|
loc := mid * types.NeedleMapEntrySize |
||||
|
key := types.BytesToNeedleId(bytes[loc : loc+types.NeedleIdSize]) |
||||
|
offset := types.BytesToOffset(bytes[loc+types.NeedleIdSize : loc+types.NeedleIdSize+types.OffsetSize]) |
||||
|
size := types.BytesToSize(bytes[loc+types.NeedleIdSize+types.OffsetSize : loc+types.NeedleIdSize+types.OffsetSize+types.SizeSize]) |
||||
|
res, err := lessThanOrEqualToFn(key, offset, size) |
||||
|
if err != nil { |
||||
|
return -1, err |
||||
|
} |
||||
|
if res { |
||||
|
left = mid + 1 |
||||
|
} else { |
||||
|
index = mid |
||||
|
right = mid - 1 |
||||
|
} |
||||
|
} |
||||
|
return index, nil |
||||
|
} |
@ -0,0 +1,57 @@ |
|||||
|
package storage |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/idx" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/types" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"os" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestFirstInvalidIndex(t *testing.T) { |
||||
|
dir := t.TempDir() |
||||
|
|
||||
|
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) |
||||
|
if err != nil { |
||||
|
t.Fatalf("volume creation: %v", err) |
||||
|
} |
||||
|
type WriteInfo struct { |
||||
|
offset int64 |
||||
|
size int32 |
||||
|
} |
||||
|
// initialize 20 needles then update first 10 needles
|
||||
|
for i := 1; i <= 30; i++ { |
||||
|
n := newRandomNeedle(uint64(i)) |
||||
|
n.Flags = 0x08 |
||||
|
_, _, _, err := v.writeNeedle2(n, true, false) |
||||
|
if err != nil { |
||||
|
t.Fatalf("write needle %d: %v", i, err) |
||||
|
} |
||||
|
} |
||||
|
b, err := os.ReadFile(v.IndexFileName() + ".idx") |
||||
|
// base case every record is valid -> nothing is filtered
|
||||
|
index, err := idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) { |
||||
|
return true, nil |
||||
|
}) |
||||
|
if err != nil { |
||||
|
t.Fatalf("failed to complete binary search %v", err) |
||||
|
} |
||||
|
assert.Equal(t, 30, index, "when every record is valid nothing should be filtered from binary search") |
||||
|
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) { |
||||
|
return false, nil |
||||
|
}) |
||||
|
assert.Equal(t, 0, index, "when every record is invalid everything should be filtered from binary search") |
||||
|
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) { |
||||
|
return key < 20, nil |
||||
|
}) |
||||
|
// needle key range from 1 to 30 so < 20 means 19 keys are valid and cutoff the bytes at 19 * 16 = 304
|
||||
|
assert.Equal(t, 19, index, "when every record is invalid everything should be filtered from binary search") |
||||
|
|
||||
|
index, err = idx.FirstInvalidIndex(b, func(key types.NeedleId, offset types.Offset, size types.Size) (bool, error) { |
||||
|
return key <= 1, nil |
||||
|
}) |
||||
|
// needle key range from 1 to 30 so <=1 1 means 1 key is valid and cutoff the bytes at 1 * 16 = 16
|
||||
|
assert.Equal(t, 1, index, "when every record is invalid everything should be filtered from binary search") |
||||
|
} |
@ -1,82 +0,0 @@ |
|||||
package needle |
|
||||
|
|
||||
import ( |
|
||||
"fmt" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend" |
|
||||
"io" |
|
||||
"os" |
|
||||
"testing" |
|
||||
|
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/types" |
|
||||
) |
|
||||
|
|
||||
func TestPageRead(t *testing.T) { |
|
||||
baseFileName := "43" |
|
||||
offset := int64(8) |
|
||||
size := types.Size(1153890) // actual file size 1153862
|
|
||||
|
|
||||
datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0644) |
|
||||
if err != nil { |
|
||||
t.Fatalf("Open Volume Data File [ERROR]: %v", err) |
|
||||
} |
|
||||
datBackend := backend.NewDiskFile(datFile) |
|
||||
defer datBackend.Close() |
|
||||
{ |
|
||||
n := new(Needle) |
|
||||
|
|
||||
bytes, err := ReadNeedleBlob(datBackend, offset, size, Version3) |
|
||||
if err != nil { |
|
||||
t.Fatalf("readNeedleBlob: %v", err) |
|
||||
} |
|
||||
if err = n.ReadBytes(bytes, offset, size, Version3); err != nil { |
|
||||
t.Fatalf("readNeedleBlob: %v", err) |
|
||||
} |
|
||||
|
|
||||
fmt.Printf("bytes len %d\n", len(bytes)) |
|
||||
fmt.Printf("name %s size %d\n", n.Name, n.Size) |
|
||||
|
|
||||
fmt.Printf("id %d\n", n.Id) |
|
||||
fmt.Printf("DataSize %d\n", n.DataSize) |
|
||||
fmt.Printf("Flags %v\n", n.Flags) |
|
||||
fmt.Printf("NameSize %d\n", n.NameSize) |
|
||||
fmt.Printf("MimeSize %d\n", n.MimeSize) |
|
||||
fmt.Printf("PairsSize %d\n", n.PairsSize) |
|
||||
fmt.Printf("LastModified %d\n", n.LastModified) |
|
||||
fmt.Printf("AppendAtNs %d\n", n.AppendAtNs) |
|
||||
fmt.Printf("Checksum %d\n", n.Checksum) |
|
||||
} |
|
||||
|
|
||||
{ |
|
||||
n, bytes, bodyLength, err := ReadNeedleHeader(datBackend, Version3, offset) |
|
||||
if err != nil { |
|
||||
t.Fatalf("ReadNeedleHeader: %v", err) |
|
||||
} |
|
||||
fmt.Printf("bytes len %d\n", len(bytes)) |
|
||||
fmt.Printf("name %s size %d bodyLength:%d\n", n.Name, n.Size, bodyLength) |
|
||||
} |
|
||||
|
|
||||
{ |
|
||||
n := new(Needle) |
|
||||
err := n.ReadNeedleMeta(datBackend, offset, size, Version3) |
|
||||
if err != nil { |
|
||||
t.Fatalf("ReadNeedleHeader: %v", err) |
|
||||
} |
|
||||
fmt.Printf("name %s size %d\n", n.Name, n.Size) |
|
||||
fmt.Printf("id %d\n", n.Id) |
|
||||
fmt.Printf("DataSize %d\n", n.DataSize) |
|
||||
fmt.Printf("Flags %v\n", n.Flags) |
|
||||
fmt.Printf("NameSize %d\n", n.NameSize) |
|
||||
fmt.Printf("MimeSize %d\n", n.MimeSize) |
|
||||
fmt.Printf("PairsSize %d\n", n.PairsSize) |
|
||||
fmt.Printf("LastModified %d\n", n.LastModified) |
|
||||
fmt.Printf("AppendAtNs %d\n", n.AppendAtNs) |
|
||||
fmt.Printf("Checksum %d\n", n.Checksum) |
|
||||
|
|
||||
buf := make([]byte, 1024) |
|
||||
if err = n.ReadNeedleDataInto(datBackend, offset, buf, io.Discard, 0, int64(n.DataSize)); err != nil { |
|
||||
t.Fatalf("ReadNeedleDataInto: %v", err) |
|
||||
} |
|
||||
|
|
||||
} |
|
||||
|
|
||||
} |
|
@ -0,0 +1,91 @@ |
|||||
|
package storage |
||||
|
|
||||
|
import ( |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/super_block" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/storage/types" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestReadNeedMetaWithWritesAndUpdates(t *testing.T) { |
||||
|
dir := t.TempDir() |
||||
|
|
||||
|
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) |
||||
|
if err != nil { |
||||
|
t.Fatalf("volume creation: %v", err) |
||||
|
} |
||||
|
type WriteInfo struct { |
||||
|
offset int64 |
||||
|
size int32 |
||||
|
} |
||||
|
writeInfos := make([]WriteInfo, 30) |
||||
|
mockLastUpdateTime := uint64(1000000000000) |
||||
|
// initialize 20 needles then update first 10 needles
|
||||
|
for i := 1; i <= 30; i++ { |
||||
|
n := newRandomNeedle(uint64(i % 20)) |
||||
|
n.Flags = 0x08 |
||||
|
n.LastModified = mockLastUpdateTime |
||||
|
mockLastUpdateTime += 2000 |
||||
|
offset, _, _, err := v.writeNeedle2(n, true, false) |
||||
|
if err != nil { |
||||
|
t.Fatalf("write needle %d: %v", i, err) |
||||
|
} |
||||
|
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(n.Size)} |
||||
|
} |
||||
|
expectedLastUpdateTime := uint64(1000000000000) |
||||
|
for i := 0; i < 30; i++ { |
||||
|
testNeedle := new(needle.Needle) |
||||
|
testNeedle.Id = types.Uint64ToNeedleId(uint64(i + 1%20)) |
||||
|
testNeedle.Flags = 0x08 |
||||
|
v.readNeedleMetaAt(testNeedle, writeInfos[i].offset, writeInfos[i].size) |
||||
|
actualLastModifiedTime := testNeedle.LastModified |
||||
|
assert.Equal(t, expectedLastUpdateTime, actualLastModifiedTime, "The two words should be the same.") |
||||
|
expectedLastUpdateTime += 2000 |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestReadNeedMetaWithDeletesThenWrites(t *testing.T) { |
||||
|
dir := t.TempDir() |
||||
|
|
||||
|
v, err := NewVolume(dir, dir, "", 1, NeedleMapInMemory, &super_block.ReplicaPlacement{}, &needle.TTL{}, 0, 0) |
||||
|
if err != nil { |
||||
|
t.Fatalf("volume creation: %v", err) |
||||
|
} |
||||
|
type WriteInfo struct { |
||||
|
offset int64 |
||||
|
size int32 |
||||
|
} |
||||
|
writeInfos := make([]WriteInfo, 10) |
||||
|
mockLastUpdateTime := uint64(1000000000000) |
||||
|
for i := 1; i <= 10; i++ { |
||||
|
n := newRandomNeedle(uint64(i % 5)) |
||||
|
n.Flags = 0x08 |
||||
|
n.LastModified = mockLastUpdateTime |
||||
|
mockLastUpdateTime += 2000 |
||||
|
offset, _, _, err := v.writeNeedle2(n, true, false) |
||||
|
if err != nil { |
||||
|
t.Fatalf("write needle %d: %v", i, err) |
||||
|
} |
||||
|
if i < 5 { |
||||
|
size, err := v.deleteNeedle2(n) |
||||
|
if err != nil { |
||||
|
t.Fatalf("delete needle %d: %v", i, err) |
||||
|
} |
||||
|
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(size)} |
||||
|
} else { |
||||
|
writeInfos[i-1] = WriteInfo{offset: int64(offset), size: int32(n.Size)} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
expectedLastUpdateTime := uint64(1000000000000) |
||||
|
for i := 0; i < 10; i++ { |
||||
|
testNeedle := new(needle.Needle) |
||||
|
testNeedle.Id = types.Uint64ToNeedleId(uint64(i + 1%5)) |
||||
|
testNeedle.Flags = 0x08 |
||||
|
v.readNeedleMetaAt(testNeedle, writeInfos[i].offset, writeInfos[i].size) |
||||
|
actualLastModifiedTime := testNeedle.LastModified |
||||
|
assert.Equal(t, expectedLastUpdateTime, actualLastModifiedTime, "The two words should be the same.") |
||||
|
expectedLastUpdateTime += 2000 |
||||
|
} |
||||
|
} |
@ -1,105 +0,0 @@ |
|||||
package storage |
|
||||
|
|
||||
import ( |
|
||||
"bufio" |
|
||||
"fmt" |
|
||||
"io" |
|
||||
"time" |
|
||||
|
|
||||
"github.com/seaweedfs/seaweedfs/weed/util" |
|
||||
|
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/backend" |
|
||||
"github.com/seaweedfs/seaweedfs/weed/storage/needle" |
|
||||
. "github.com/seaweedfs/seaweedfs/weed/storage/types" |
|
||||
) |
|
||||
|
|
||||
func (v *Volume) StreamWrite(n *needle.Needle, data io.Reader, dataSize uint32) (err error) { |
|
||||
|
|
||||
v.dataFileAccessLock.Lock() |
|
||||
defer v.dataFileAccessLock.Unlock() |
|
||||
|
|
||||
df, ok := v.DataBackend.(*backend.DiskFile) |
|
||||
if !ok { |
|
||||
return fmt.Errorf("unexpected volume backend") |
|
||||
} |
|
||||
offset, _, _ := v.DataBackend.GetStat() |
|
||||
|
|
||||
header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
|
|
||||
CookieToBytes(header[0:CookieSize], n.Cookie) |
|
||||
NeedleIdToBytes(header[CookieSize:CookieSize+NeedleIdSize], n.Id) |
|
||||
n.Size = 4 + Size(dataSize) + 1 |
|
||||
SizeToBytes(header[CookieSize+NeedleIdSize:CookieSize+NeedleIdSize+SizeSize], n.Size) |
|
||||
|
|
||||
n.DataSize = dataSize |
|
||||
|
|
||||
// needle header
|
|
||||
df.Write(header[0:NeedleHeaderSize]) |
|
||||
|
|
||||
// data size and data
|
|
||||
util.Uint32toBytes(header[0:4], n.DataSize) |
|
||||
df.Write(header[0:4]) |
|
||||
// write and calculate CRC
|
|
||||
crcWriter := needle.NewCRCwriter(df) |
|
||||
io.Copy(crcWriter, io.LimitReader(data, int64(dataSize))) |
|
||||
|
|
||||
// flags
|
|
||||
util.Uint8toBytes(header[0:1], n.Flags) |
|
||||
df.Write(header[0:1]) |
|
||||
|
|
||||
// data checksum
|
|
||||
util.Uint32toBytes(header[0:needle.NeedleChecksumSize], crcWriter.Sum()) |
|
||||
// write timestamp, padding
|
|
||||
n.AppendAtNs = uint64(time.Now().UnixNano()) |
|
||||
util.Uint64toBytes(header[needle.NeedleChecksumSize:needle.NeedleChecksumSize+TimestampSize], n.AppendAtNs) |
|
||||
padding := needle.PaddingLength(n.Size, needle.Version3) |
|
||||
df.Write(header[0 : needle.NeedleChecksumSize+TimestampSize+padding]) |
|
||||
|
|
||||
// add to needle map
|
|
||||
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil { |
|
||||
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err) |
|
||||
} |
|
||||
return |
|
||||
} |
|
||||
|
|
||||
func (v *Volume) StreamRead(n *needle.Needle, writer io.Writer) (err error) { |
|
||||
|
|
||||
v.dataFileAccessLock.Lock() |
|
||||
defer v.dataFileAccessLock.Unlock() |
|
||||
|
|
||||
nv, ok := v.nm.Get(n.Id) |
|
||||
if !ok || nv.Offset.IsZero() { |
|
||||
return ErrorNotFound |
|
||||
} |
|
||||
|
|
||||
sr := &StreamReader{ |
|
||||
readerAt: v.DataBackend, |
|
||||
offset: nv.Offset.ToActualOffset(), |
|
||||
} |
|
||||
bufReader := bufio.NewReader(sr) |
|
||||
bufReader.Discard(NeedleHeaderSize) |
|
||||
sizeBuf := make([]byte, 4) |
|
||||
bufReader.Read(sizeBuf) |
|
||||
if _, err = writer.Write(sizeBuf); err != nil { |
|
||||
return err |
|
||||
} |
|
||||
dataSize := util.BytesToUint32(sizeBuf) |
|
||||
|
|
||||
_, err = io.Copy(writer, io.LimitReader(bufReader, int64(dataSize))) |
|
||||
|
|
||||
return |
|
||||
} |
|
||||
|
|
||||
type StreamReader struct { |
|
||||
offset int64 |
|
||||
readerAt io.ReaderAt |
|
||||
} |
|
||||
|
|
||||
func (sr *StreamReader) Read(p []byte) (n int, err error) { |
|
||||
n, err = sr.readerAt.ReadAt(p, sr.offset) |
|
||||
if err != nil { |
|
||||
return |
|
||||
} |
|
||||
sr.offset += int64(n) |
|
||||
return |
|
||||
} |
|
Write
Preview
Loading…
Cancel
Save
Reference in new issue