Browse Source

Merge branch 'master' of https://github.com/hans-strudle/seaweedfs

pull/729/head
jenkins.ow 6 years ago
parent
commit
1690a080b2
  1. 14
      weed/filer2/filechunks.go
  2. 93
      weed/filesys/dirty_page.go
  3. 5
      weed/filesys/filehandle.go
  4. 4
      weed/pb/filer.proto
  5. 178
      weed/pb/filer_pb/filer.pb.go
  6. 123
      weed/s3api/filer_multipart.go
  7. 44
      weed/s3api/filer_util.go
  8. 2
      weed/s3api/s3api_bucket_handlers.go
  9. 14
      weed/s3api/s3api_object_multipart_handlers.go
  10. 2
      weed/s3api/s3api_objects_list_handlers.go
  11. 2
      weed/s3api/s3api_server.go
  12. 1
      weed/server/filer_server_handlers_read.go
  13. 2
      weed/server/filer_server_handlers_write.go
  14. 7
      weed/server/volume_server_handlers_read.go
  15. 9
      weed/server/volume_server_handlers_write.go
  16. 2
      weed/storage/crc.go
  17. 11
      weed/storage/volume_loading.go
  18. 2
      weed/util/constants.go

14
weed/filer2/filechunks.go

@ -1,6 +1,8 @@
package filer2 package filer2
import ( import (
"fmt"
"hash/fnv"
"math" "math"
"sort" "sort"
@ -17,6 +19,18 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return return
} }
func ETag(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 {
return chunks[0].ETag
}
h := fnv.New32a()
for _, c := range chunks {
h.Write([]byte(c.ETag))
}
return fmt.Sprintf("%x", h.Sum32())
}
func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles := nonOverlappingVisibleIntervals(chunks) visibles := nonOverlappingVisibleIntervals(chunks)

93
weed/filesys/dirty_page.go

@ -9,6 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"sync"
) )
type ContinuousDirtyPages struct { type ContinuousDirtyPages struct {
@ -17,6 +18,7 @@ type ContinuousDirtyPages struct {
Size int64 Size int64
Data []byte Data []byte
f *File f *File
lock sync.Mutex
} }
func newDirtyPages(file *File) *ContinuousDirtyPages { func newDirtyPages(file *File) *ContinuousDirtyPages {
@ -28,35 +30,14 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
var chunk *filer_pb.FileChunk var chunk *filer_pb.FileChunk
if len(data) > len(pages.Data) { if len(data) > len(pages.Data) {
// this is more than what buffer can hold. // this is more than what buffer can hold.
// flush existing
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
}
chunks = append(chunks, chunk)
} else {
glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
pages.Size = 0
// flush the big page
if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
return pages.flushAndSave(ctx, offset, data)
} }
if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||
@ -77,25 +58,74 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
return return
} }
pages.Offset = offset pages.Offset = offset
pages.Size = int64(len(data))
copy(pages.Data, data) copy(pages.Data, data)
pages.Size = int64(len(data))
return return
} }
copy(pages.Data[offset-pages.Offset:], data)
if offset != pages.Offset+pages.Size {
// when this happens, debug shows the data overlapping with existing data is empty
// the data is not just append
if offset == pages.Offset {
copy(pages.Data[pages.Size:], data[pages.Size:])
} else {
if pages.Size != 0 {
glog.V(0).Infof("possible error: pages [%d, %d) write [%d, %d)", pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data)))
}
return pages.flushAndSave(ctx, offset, data)
}
} else {
copy(pages.Data[offset-pages.Offset:], data)
}
pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)
return return
} }
func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
var chunk *filer_pb.FileChunk
// flush existing
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
pages.Size = 0
pages.Offset = 0
// flush the new page
if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {
if chunk != nil {
glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
chunks = append(chunks, chunk)
}
} else {
glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return
}
return
}
func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {
pages.lock.Lock()
defer pages.lock.Unlock()
if pages.Size == 0 { if pages.Size == 0 {
return nil, nil return nil, nil
} }
if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {
pages.Size = 0 pages.Size = 0
pages.Offset = 0
if chunk != nil { if chunk != nil {
glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))
} }
@ -104,15 +134,16 @@ func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *f
} }
func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {
return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)
}
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
if pages.Size == 0 { if pages.Size == 0 {
return nil, nil return nil, nil
} }
return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)
}
func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {
var fileId, host string var fileId, host string
if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {

5
weed/filesys/filehandle.go

@ -140,6 +140,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data)
if err != nil { if err != nil {
glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err)
} }
@ -179,7 +180,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
chunk, err := fh.dirtyPages.FlushToStorage(ctx) chunk, err := fh.dirtyPages.FlushToStorage(ctx)
if err != nil { if err != nil {
glog.V(0).Infof("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
glog.Errorf("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
return fmt.Errorf("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return fmt.Errorf("flush %s/%s to %s [%d,%d): %v", fh.f.dir.Path, fh.f.Name, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size), err)
} }
if chunk != nil { if chunk != nil {
@ -200,6 +201,8 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
if fh.f.attributes != nil { if fh.f.attributes != nil {
fh.f.attributes.Mime = fh.contentType fh.f.attributes.Mime = fh.contentType
fh.f.attributes.Uid = req.Uid
fh.f.attributes.Gid = req.Gid
} }
request := &filer_pb.UpdateEntryRequest{ request := &filer_pb.UpdateEntryRequest{

4
weed/pb/filer.proto

@ -63,7 +63,7 @@ message Entry {
bool is_directory = 2; bool is_directory = 2;
repeated FileChunk chunks = 3; repeated FileChunk chunks = 3;
FuseAttributes attributes = 4; FuseAttributes attributes = 4;
map<string, string> extended = 5;
map<string, bytes> extended = 5;
} }
message EventNotification { message EventNotification {
@ -76,6 +76,7 @@ message FileChunk {
int64 offset = 2; int64 offset = 2;
uint64 size = 3; uint64 size = 3;
int64 mtime = 4; int64 mtime = 4;
string e_tag = 5;
} }
message FuseAttributes { message FuseAttributes {
@ -100,6 +101,7 @@ message GetEntryAttributesRequest {
message GetEntryAttributesResponse { message GetEntryAttributesResponse {
FuseAttributes attributes = 1; FuseAttributes attributes = 1;
repeated FileChunk chunks = 2; repeated FileChunk chunks = 2;
map<string, bytes> extended = 3;
} }
message GetFileContentRequest { message GetFileContentRequest {

178
weed/pb/filer_pb/filer.pb.go

@ -167,7 +167,7 @@ type Entry struct {
IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"` IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"`
Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"` Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"`
Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"` Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"`
Extended map[string]string `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
} }
func (m *Entry) Reset() { *m = Entry{} } func (m *Entry) Reset() { *m = Entry{} }
@ -203,7 +203,7 @@ func (m *Entry) GetAttributes() *FuseAttributes {
return nil return nil
} }
func (m *Entry) GetExtended() map[string]string {
func (m *Entry) GetExtended() map[string][]byte {
if m != nil { if m != nil {
return m.Extended return m.Extended
} }
@ -239,6 +239,7 @@ type FileChunk struct {
Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"`
Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"`
Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"`
ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"`
} }
func (m *FileChunk) Reset() { *m = FileChunk{} } func (m *FileChunk) Reset() { *m = FileChunk{} }
@ -274,6 +275,13 @@ func (m *FileChunk) GetMtime() int64 {
return 0 return 0
} }
func (m *FileChunk) GetETag() string {
if m != nil {
return m.ETag
}
return ""
}
type FuseAttributes struct { type FuseAttributes struct {
FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"`
Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"`
@ -395,8 +403,9 @@ func (m *GetEntryAttributesRequest) GetFileId() string {
} }
type GetEntryAttributesResponse struct { type GetEntryAttributesResponse struct {
Attributes *FuseAttributes `protobuf:"bytes,1,opt,name=attributes" json:"attributes,omitempty"`
Chunks []*FileChunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"`
Attributes *FuseAttributes `protobuf:"bytes,1,opt,name=attributes" json:"attributes,omitempty"`
Chunks []*FileChunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"`
Extended map[string][]byte `protobuf:"bytes,3,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
} }
func (m *GetEntryAttributesResponse) Reset() { *m = GetEntryAttributesResponse{} } func (m *GetEntryAttributesResponse) Reset() { *m = GetEntryAttributesResponse{} }
@ -418,6 +427,13 @@ func (m *GetEntryAttributesResponse) GetChunks() []*FileChunk {
return nil return nil
} }
func (m *GetEntryAttributesResponse) GetExtended() map[string][]byte {
if m != nil {
return m.Extended
}
return nil
}
type GetFileContentRequest struct { type GetFileContentRequest struct {
FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"`
} }
@ -1122,80 +1138,82 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 1187 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6e, 0xdb, 0x46,
0x10, 0x36, 0x49, 0x4b, 0x16, 0x47, 0x72, 0x1a, 0xaf, 0x9d, 0x84, 0x55, 0xec, 0x54, 0x61, 0x93,
0xc2, 0x41, 0x0d, 0xc3, 0x70, 0x7b, 0x48, 0x1a, 0x14, 0x68, 0x60, 0x3b, 0x41, 0x00, 0x27, 0x01,
0xe8, 0xba, 0x40, 0xd1, 0x83, 0x40, 0x93, 0x23, 0x77, 0x61, 0x8a, 0x54, 0xc9, 0xa5, 0x1c, 0xf7,
0xda, 0x63, 0x4f, 0x7d, 0x80, 0x9e, 0x7b, 0xef, 0x03, 0xf4, 0xd2, 0x17, 0x2b, 0xf6, 0x87, 0xd4,
0x52, 0xa4, 0x9c, 0xfa, 0x90, 0xdb, 0xee, 0xcc, 0xec, 0xcc, 0x7c, 0xb3, 0x33, 0xdf, 0x92, 0xd0,
0x1d, 0xd1, 0x08, 0xd3, 0xdd, 0x49, 0x9a, 0xb0, 0x84, 0x74, 0xc4, 0x66, 0x38, 0x39, 0x73, 0xdf,
0xc1, 0xfd, 0xe3, 0x24, 0xb9, 0xc8, 0x27, 0x87, 0x34, 0xc5, 0x80, 0x25, 0xe9, 0xd5, 0x51, 0xcc,
0xd2, 0x2b, 0x0f, 0x7f, 0xc9, 0x31, 0x63, 0x64, 0x13, 0xec, 0xb0, 0x50, 0x38, 0xc6, 0xc0, 0xd8,
0xb6, 0xbd, 0x99, 0x80, 0x10, 0x58, 0x8e, 0xfd, 0x31, 0x3a, 0xa6, 0x50, 0x88, 0xb5, 0x7b, 0x04,
0x9b, 0xcd, 0x0e, 0xb3, 0x49, 0x12, 0x67, 0x48, 0x1e, 0x43, 0x0b, 0xb9, 0x40, 0x78, 0xeb, 0xee,
0x7f, 0xb2, 0x5b, 0xa4, 0xb2, 0x2b, 0xed, 0xa4, 0xd6, 0xfd, 0xc7, 0x00, 0x72, 0x4c, 0x33, 0xc6,
0x85, 0x14, 0xb3, 0xff, 0x97, 0xcf, 0x5d, 0x68, 0x4f, 0x52, 0x1c, 0xd1, 0xf7, 0x2a, 0x23, 0xb5,
0x23, 0x3b, 0xb0, 0x96, 0x31, 0x3f, 0x65, 0x2f, 0xd3, 0x64, 0xfc, 0x92, 0x46, 0xf8, 0x96, 0x27,
0x6d, 0x09, 0x93, 0xba, 0x82, 0xec, 0x02, 0xa1, 0x71, 0x10, 0xe5, 0x19, 0x9d, 0xe2, 0x49, 0xa1,
0x75, 0x96, 0x07, 0xc6, 0x76, 0xc7, 0x6b, 0xd0, 0x90, 0x0d, 0x68, 0x45, 0x74, 0x4c, 0x99, 0xd3,
0x1a, 0x18, 0xdb, 0xab, 0x9e, 0xdc, 0xb8, 0xdf, 0xc1, 0x7a, 0x25, 0x7f, 0x05, 0xff, 0x09, 0xac,
0xa0, 0x14, 0x39, 0xc6, 0xc0, 0x6a, 0x2a, 0x40, 0xa1, 0x77, 0xff, 0x34, 0xa1, 0x25, 0x44, 0x65,
0x9d, 0x8d, 0x59, 0x9d, 0xc9, 0x43, 0xe8, 0xd1, 0x6c, 0x38, 0x2b, 0x86, 0x29, 0xf2, 0xeb, 0xd2,
0xac, 0xac, 0x3b, 0xf9, 0x12, 0xda, 0xc1, 0xcf, 0x79, 0x7c, 0x91, 0x39, 0x96, 0x08, 0xb5, 0x3e,
0x0b, 0xc5, 0xc1, 0x1e, 0x70, 0x9d, 0xa7, 0x4c, 0xc8, 0x53, 0x00, 0x9f, 0xb1, 0x94, 0x9e, 0xe5,
0x0c, 0x33, 0x81, 0xb6, 0xbb, 0xef, 0x68, 0x07, 0xf2, 0x0c, 0x5f, 0x94, 0x7a, 0x4f, 0xb3, 0x25,
0xcf, 0xa0, 0x83, 0xef, 0x19, 0xc6, 0x21, 0x86, 0x4e, 0x4b, 0x04, 0xda, 0x9a, 0xc3, 0xb4, 0x7b,
0xa4, 0xf4, 0x12, 0x61, 0x69, 0xde, 0x7f, 0x0e, 0xab, 0x15, 0x15, 0xb9, 0x0d, 0xd6, 0x05, 0x16,
0x37, 0xcb, 0x97, 0xbc, 0xba, 0x53, 0x3f, 0xca, 0x8b, 0x26, 0x93, 0x9b, 0x6f, 0xcc, 0xa7, 0x86,
0x9b, 0xc0, 0xda, 0xd1, 0x14, 0x63, 0xf6, 0x36, 0x61, 0x74, 0x44, 0x03, 0x9f, 0xd1, 0x24, 0x26,
0x3b, 0x60, 0x27, 0x51, 0x38, 0x94, 0x2d, 0x66, 0x36, 0xb7, 0x58, 0x27, 0x89, 0x54, 0xb8, 0x1d,
0xb0, 0x63, 0xbc, 0x54, 0xd6, 0xd6, 0x02, 0xeb, 0x18, 0x2f, 0xc5, 0xca, 0x1d, 0x81, 0x5d, 0xd6,
0x8d, 0xdc, 0x83, 0x15, 0x6e, 0x38, 0xa4, 0xa1, 0xca, 0xb6, 0xcd, 0xb7, 0xaf, 0x43, 0xde, 0x84,
0xc9, 0x68, 0x94, 0x21, 0x13, 0xe1, 0x2d, 0x4f, 0xed, 0xf8, 0x25, 0x66, 0xf4, 0x57, 0xd9, 0x77,
0xcb, 0x9e, 0x58, 0x73, 0x70, 0x63, 0x46, 0xc7, 0x28, 0xea, 0x6d, 0x79, 0x72, 0xe3, 0xfe, 0x6e,
0xc2, 0xad, 0x6a, 0xbd, 0xc9, 0x7d, 0xb0, 0x45, 0x34, 0xe1, 0xc1, 0x10, 0x1e, 0xc4, 0x0c, 0x9f,
0x54, 0xbc, 0x98, 0x9a, 0x97, 0xf2, 0xc8, 0x38, 0x09, 0x65, 0xd0, 0x55, 0x79, 0xe4, 0x4d, 0x12,
0x22, 0xaf, 0x73, 0x4e, 0x43, 0x11, 0x76, 0xd5, 0xe3, 0x4b, 0x2e, 0x39, 0xa7, 0xa1, 0xea, 0x61,
0xbe, 0xe4, 0x40, 0x82, 0x54, 0xf8, 0x6d, 0x4b, 0x20, 0x72, 0xc7, 0x81, 0x8c, 0xb9, 0x74, 0x45,
0x76, 0x23, 0x5f, 0x93, 0x01, 0x74, 0x53, 0x9c, 0x44, 0xea, 0x16, 0x9c, 0x8e, 0x50, 0xe9, 0x22,
0xf2, 0x00, 0x20, 0x48, 0xa2, 0x08, 0x03, 0x61, 0x60, 0x0b, 0x03, 0x4d, 0xc2, 0xeb, 0xc9, 0x58,
0x34, 0xcc, 0x30, 0x70, 0x60, 0x60, 0x6c, 0xb7, 0xbc, 0x36, 0x63, 0xd1, 0x09, 0x06, 0xee, 0x39,
0x7c, 0xfa, 0x0a, 0xc5, 0x1c, 0x5d, 0x69, 0x0d, 0xa8, 0xf8, 0xa0, 0x69, 0x32, 0xb6, 0x00, 0x26,
0x7e, 0x8a, 0x31, 0xe3, 0xd3, 0xa1, 0xda, 0xc6, 0x96, 0x92, 0x43, 0x9a, 0xea, 0x17, 0x67, 0xe9,
0x17, 0xe7, 0xfe, 0x66, 0x40, 0xbf, 0x29, 0x92, 0x9a, 0xdc, 0xea, 0x80, 0x18, 0x37, 0x18, 0x90,
0xd9, 0x1c, 0x9a, 0x1f, 0x9c, 0x43, 0x77, 0x0f, 0xee, 0xbc, 0x42, 0x26, 0xe4, 0x49, 0xcc, 0x30,
0x66, 0x05, 0xd4, 0x45, 0x0d, 0xe7, 0xee, 0xc3, 0xdd, 0xf9, 0x13, 0x2a, 0x65, 0x07, 0x56, 0x02,
0x29, 0x12, 0x47, 0x7a, 0x5e, 0xb1, 0x75, 0x7f, 0x04, 0x72, 0x90, 0xa2, 0xcf, 0xf0, 0x06, 0x6c,
0x5f, 0x32, 0xb7, 0x79, 0x2d, 0x73, 0xdf, 0x81, 0xf5, 0x8a, 0x6b, 0x99, 0x0b, 0x8f, 0x78, 0x3a,
0x09, 0x3f, 0x56, 0xc4, 0x8a, 0x6b, 0x15, 0xf1, 0x6f, 0x03, 0xc8, 0x21, 0x46, 0x78, 0xa3, 0x90,
0x0d, 0x4f, 0x5a, 0x8d, 0x6a, 0xad, 0x3a, 0xd5, 0x3e, 0x82, 0x5b, 0xdc, 0x44, 0x44, 0x1b, 0x86,
0x3e, 0xf3, 0xd5, 0x7b, 0xd1, 0xa3, 0x99, 0x4c, 0xe1, 0xd0, 0x67, 0xbe, 0x72, 0x94, 0x62, 0x90,
0xa7, 0xfc, 0x09, 0x11, 0xc3, 0x26, 0x1c, 0x79, 0x85, 0x88, 0x63, 0xa9, 0xe4, 0xac, 0xb0, 0xfc,
0x65, 0xc0, 0xfa, 0x8b, 0x2c, 0xa3, 0xe7, 0xf1, 0x0f, 0x49, 0x94, 0x8f, 0xb1, 0x00, 0xb3, 0x01,
0xad, 0x20, 0xc9, 0xd5, 0xfd, 0xb6, 0x3c, 0xb9, 0x99, 0x9b, 0x35, 0xb3, 0x36, 0x6b, 0x73, 0xd3,
0x6a, 0xd5, 0xa7, 0x55, 0x9b, 0xc6, 0x65, 0x7d, 0x1a, 0xc9, 0x67, 0xd0, 0xe5, 0xf0, 0x86, 0x01,
0xc6, 0x0c, 0x53, 0x81, 0xc0, 0xf6, 0x80, 0x8b, 0x0e, 0x84, 0xc4, 0x9d, 0xc2, 0x46, 0x35, 0x51,
0xd5, 0x8b, 0x0b, 0xf9, 0x92, 0x53, 0x51, 0x1a, 0xa9, 0x2c, 0xf9, 0x52, 0x0c, 0x70, 0x7e, 0x16,
0xd1, 0x60, 0xc8, 0x15, 0x96, 0x1a, 0x60, 0x21, 0x39, 0x4d, 0xa3, 0x19, 0xe6, 0x65, 0x0d, 0xb3,
0xfb, 0x35, 0xac, 0xcb, 0xef, 0x8e, 0x6a, 0x81, 0xb6, 0x00, 0xa6, 0x42, 0x30, 0xa4, 0xa1, 0x7c,
0x72, 0x6d, 0xcf, 0x96, 0x92, 0xd7, 0x61, 0xe6, 0x7e, 0x0b, 0xf6, 0x71, 0x22, 0x31, 0x67, 0x64,
0x0f, 0xec, 0xa8, 0xd8, 0xa8, 0xd7, 0x99, 0xcc, 0x5a, 0xae, 0xb0, 0xf3, 0x66, 0x46, 0xee, 0x73,
0xe8, 0x14, 0xe2, 0x02, 0x87, 0xb1, 0x08, 0x87, 0x39, 0x87, 0xc3, 0xfd, 0xd7, 0x80, 0x8d, 0x6a,
0xca, 0xaa, 0x54, 0xa7, 0xb0, 0x5a, 0x86, 0x18, 0x8e, 0xfd, 0x89, 0xca, 0x65, 0x4f, 0xcf, 0xa5,
0x7e, 0xac, 0x4c, 0x30, 0x7b, 0xe3, 0x4f, 0x64, 0xf7, 0xf4, 0x22, 0x4d, 0xd4, 0xff, 0x1e, 0xd6,
0x6a, 0x26, 0x0d, 0x0f, 0xee, 0x13, 0xfd, 0xc1, 0xad, 0x90, 0x55, 0x79, 0x5a, 0x7f, 0x85, 0x9f,
0xc1, 0x3d, 0xd9, 0xb0, 0x07, 0x65, 0x7f, 0x15, 0xb5, 0xaf, 0xb6, 0xa1, 0x31, 0xdf, 0x86, 0x6e,
0x1f, 0x9c, 0xfa, 0x51, 0x09, 0x66, 0xff, 0x8f, 0x36, 0xf4, 0x4e, 0xd0, 0xbf, 0x44, 0x0c, 0x39,
0xb3, 0xa5, 0xe4, 0xbc, 0x28, 0x56, 0xf5, 0xbb, 0x92, 0x3c, 0x9e, 0xaf, 0x4a, 0xe3, 0x87, 0x6c,
0xff, 0x8b, 0x0f, 0x99, 0xa9, 0x41, 0x5b, 0x22, 0xc7, 0xd0, 0xd5, 0x3e, 0xdc, 0xc8, 0xa6, 0x76,
0xb0, 0xf6, 0x3d, 0xda, 0xdf, 0x5a, 0xa0, 0x2d, 0xbd, 0xf9, 0x40, 0xea, 0x6f, 0x0a, 0xf9, 0x7c,
0x76, 0x6c, 0xe1, 0xdb, 0xd6, 0x7f, 0x74, 0xbd, 0x91, 0x9e, 0xb0, 0x46, 0xb8, 0x7a, 0xc2, 0x75,
0x8a, 0xd7, 0x13, 0x6e, 0x62, 0x69, 0xe1, 0x4d, 0x23, 0x53, 0xdd, 0x5b, 0x9d, 0xbe, 0x75, 0x6f,
0x4d, 0x0c, 0x2c, 0xbc, 0x69, 0x74, 0xa6, 0x7b, 0xab, 0x33, 0xb3, 0xee, 0xad, 0x89, 0x03, 0x97,
0xc8, 0x3b, 0xe8, 0xe9, 0xdc, 0x42, 0xb4, 0x03, 0x0d, 0xe4, 0xd8, 0x7f, 0xb0, 0x48, 0xad, 0x3b,
0xd4, 0x47, 0x49, 0x77, 0xd8, 0x40, 0x26, 0xba, 0xc3, 0xa6, 0x09, 0x74, 0x97, 0xc8, 0x4f, 0x70,
0x7b, 0xbe, 0xa5, 0xc9, 0xc3, 0x79, 0x58, 0xb5, 0x49, 0xe9, 0xbb, 0xd7, 0x99, 0x14, 0xce, 0xcf,
0xda, 0xe2, 0xe7, 0xed, 0xab, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x18, 0xac, 0xec, 0xcb,
0x0d, 0x00, 0x00,
// 1218 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6e, 0xdb, 0xc6,
0x13, 0x37, 0xa9, 0x0f, 0x8b, 0x23, 0x39, 0xff, 0x78, 0xe5, 0x24, 0xfc, 0x2b, 0x76, 0xaa, 0xb0,
0x49, 0xe1, 0xa0, 0x86, 0x61, 0xb8, 0x3d, 0x24, 0x0d, 0x0a, 0x34, 0xb0, 0x9d, 0x20, 0x80, 0xe3,
0x00, 0x74, 0x5c, 0xa0, 0xe8, 0x41, 0xa0, 0xc9, 0xb1, 0xba, 0x30, 0x45, 0xaa, 0xe4, 0xd2, 0x8e,
0xdb, 0x47, 0xe8, 0xa5, 0x7d, 0x80, 0x9e, 0x7b, 0xef, 0x03, 0xf4, 0xd2, 0x17, 0x2b, 0xf6, 0x83,
0xd4, 0x52, 0xa4, 0x9c, 0xf8, 0x90, 0xdb, 0xee, 0xcc, 0xec, 0xcc, 0x6f, 0x86, 0x33, 0xbf, 0x5d,
0x42, 0xf7, 0x8c, 0x86, 0x98, 0x6c, 0x4f, 0x93, 0x98, 0xc5, 0xa4, 0x23, 0x36, 0xa3, 0xe9, 0xa9,
0xf3, 0x16, 0xee, 0x1f, 0xc6, 0xf1, 0x79, 0x36, 0xdd, 0xa7, 0x09, 0xfa, 0x2c, 0x4e, 0xae, 0x0e,
0x22, 0x96, 0x5c, 0xb9, 0xf8, 0x73, 0x86, 0x29, 0x23, 0xeb, 0x60, 0x05, 0xb9, 0xc2, 0x36, 0x86,
0xc6, 0xa6, 0xe5, 0xce, 0x04, 0x84, 0x40, 0x33, 0xf2, 0x26, 0x68, 0x9b, 0x42, 0x21, 0xd6, 0xce,
0x01, 0xac, 0xd7, 0x3b, 0x4c, 0xa7, 0x71, 0x94, 0x22, 0x79, 0x0c, 0x2d, 0xe4, 0x02, 0xe1, 0xad,
0xbb, 0xfb, 0xbf, 0xed, 0x1c, 0xca, 0xb6, 0xb4, 0x93, 0x5a, 0xe7, 0x1f, 0x03, 0xc8, 0x21, 0x4d,
0x19, 0x17, 0x52, 0x4c, 0x3f, 0x0e, 0xcf, 0x5d, 0x68, 0x4f, 0x13, 0x3c, 0xa3, 0xef, 0x15, 0x22,
0xb5, 0x23, 0x5b, 0xb0, 0x9a, 0x32, 0x2f, 0x61, 0x2f, 0x93, 0x78, 0xf2, 0x92, 0x86, 0x78, 0xc4,
0x41, 0x37, 0x84, 0x49, 0x55, 0x41, 0xb6, 0x81, 0xd0, 0xc8, 0x0f, 0xb3, 0x94, 0x5e, 0xe0, 0x71,
0xae, 0xb5, 0x9b, 0x43, 0x63, 0xb3, 0xe3, 0xd6, 0x68, 0xc8, 0x1a, 0xb4, 0x42, 0x3a, 0xa1, 0xcc,
0x6e, 0x0d, 0x8d, 0xcd, 0x15, 0x57, 0x6e, 0x9c, 0xef, 0xa0, 0x5f, 0xc2, 0xaf, 0xd2, 0x7f, 0x02,
0xcb, 0x28, 0x45, 0xb6, 0x31, 0x6c, 0xd4, 0x15, 0x20, 0xd7, 0x3b, 0x7f, 0x9a, 0xd0, 0x12, 0xa2,
0xa2, 0xce, 0xc6, 0xac, 0xce, 0xe4, 0x21, 0xf4, 0x68, 0x3a, 0x9a, 0x15, 0xc3, 0x14, 0xf8, 0xba,
0x34, 0x2d, 0xea, 0x4e, 0xbe, 0x84, 0xb6, 0xff, 0x53, 0x16, 0x9d, 0xa7, 0x76, 0x43, 0x84, 0xea,
0xcf, 0x42, 0xf1, 0x64, 0xf7, 0xb8, 0xce, 0x55, 0x26, 0xe4, 0x29, 0x80, 0xc7, 0x58, 0x42, 0x4f,
0x33, 0x86, 0xa9, 0xc8, 0xb6, 0xbb, 0x6b, 0x6b, 0x07, 0xb2, 0x14, 0x5f, 0x14, 0x7a, 0x57, 0xb3,
0x25, 0xcf, 0xa0, 0x83, 0xef, 0x19, 0x46, 0x01, 0x06, 0x76, 0x4b, 0x04, 0xda, 0x98, 0xcb, 0x69,
0xfb, 0x40, 0xe9, 0x65, 0x86, 0x85, 0xf9, 0xe0, 0x39, 0xac, 0x94, 0x54, 0xe4, 0x36, 0x34, 0xce,
0x31, 0xff, 0xb2, 0x7c, 0xc9, 0xab, 0x7b, 0xe1, 0x85, 0x99, 0x6c, 0xb2, 0x9e, 0x2b, 0x37, 0xdf,
0x98, 0x4f, 0x0d, 0x27, 0x86, 0xd5, 0x83, 0x0b, 0x8c, 0xd8, 0x51, 0xcc, 0xe8, 0x19, 0xf5, 0x3d,
0x46, 0xe3, 0x88, 0x6c, 0x81, 0x15, 0x87, 0xc1, 0x48, 0xb6, 0x98, 0x59, 0xdf, 0x62, 0x9d, 0x38,
0x54, 0xe1, 0xb6, 0xc0, 0x8a, 0xf0, 0x52, 0x59, 0x37, 0x16, 0x58, 0x47, 0x78, 0x29, 0x56, 0xce,
0xaf, 0x60, 0x15, 0x75, 0x23, 0xf7, 0x60, 0x99, 0x1b, 0x8e, 0x68, 0xa0, 0xd0, 0xb6, 0xf9, 0xf6,
0x75, 0xc0, 0x9b, 0x30, 0x3e, 0x3b, 0x4b, 0x91, 0x89, 0xf0, 0x0d, 0x57, 0xed, 0xf8, 0x47, 0x4c,
0xe9, 0x2f, 0xb2, 0xef, 0x9a, 0xae, 0x58, 0xf3, 0xe4, 0x26, 0x8c, 0x4e, 0x50, 0xd4, 0xbb, 0xe1,
0xca, 0x0d, 0xe9, 0x43, 0x0b, 0x47, 0xcc, 0x1b, 0x8b, 0x86, 0xb2, 0xdc, 0x26, 0xbe, 0xf3, 0xc6,
0xce, 0x6f, 0x26, 0xdc, 0x2a, 0x7f, 0x04, 0x72, 0x1f, 0x2c, 0x01, 0x41, 0xb8, 0x35, 0x84, 0x5b,
0x31, 0xd8, 0xc7, 0x25, 0xd7, 0xa6, 0xee, 0x3a, 0x3f, 0x32, 0x89, 0x03, 0x89, 0x64, 0x45, 0x1e,
0x79, 0x13, 0x07, 0xc8, 0x8b, 0x9f, 0xd1, 0x40, 0x60, 0x59, 0x71, 0xf9, 0x92, 0x4b, 0xc6, 0x34,
0x50, 0x8d, 0xcd, 0x97, 0x3c, 0x3b, 0x3f, 0x11, 0x7e, 0xdb, 0x32, 0x3b, 0xb9, 0xe3, 0xd9, 0x4d,
0xb8, 0x74, 0x59, 0x42, 0xe6, 0x6b, 0x32, 0x84, 0x6e, 0x82, 0xd3, 0x50, 0x7d, 0x1a, 0xbb, 0x23,
0x54, 0xba, 0x88, 0x3c, 0x00, 0xf0, 0xe3, 0x30, 0x44, 0x5f, 0x18, 0x58, 0xc2, 0x40, 0x93, 0xf0,
0x22, 0x33, 0x16, 0x8e, 0x52, 0xf4, 0x6d, 0x18, 0x1a, 0x9b, 0x2d, 0xb7, 0xcd, 0x58, 0x78, 0x8c,
0xbe, 0x33, 0x86, 0xff, 0xbf, 0x42, 0x31, 0x5c, 0x57, 0x5a, 0x57, 0x2a, 0x92, 0xa8, 0x1b, 0x97,
0x0d, 0x80, 0xa9, 0x97, 0x60, 0xc4, 0xf8, 0xc8, 0x28, 0x7a, 0xb0, 0xa4, 0x64, 0x9f, 0x26, 0xfa,
0xd7, 0x6c, 0xe8, 0x5f, 0xd3, 0xf9, 0xdd, 0x84, 0x41, 0x5d, 0x24, 0x35, 0xce, 0xe5, 0xa9, 0x31,
0x6e, 0x30, 0x35, 0xb3, 0xe1, 0x34, 0x3f, 0x3c, 0x9c, 0x47, 0xda, 0x88, 0xc9, 0x59, 0xde, 0x9d,
0x99, 0x2f, 0x86, 0xf7, 0x69, 0xe6, 0x6e, 0x07, 0xee, 0xbc, 0x42, 0x26, 0x40, 0xc6, 0x11, 0xc3,
0x88, 0xe5, 0x75, 0x5f, 0x34, 0x12, 0xce, 0x2e, 0xdc, 0x9d, 0x3f, 0xa1, 0xea, 0x67, 0xc3, 0xb2,
0x2f, 0x45, 0xe2, 0x48, 0xcf, 0xcd, 0xb7, 0xce, 0x0f, 0x40, 0xf6, 0x12, 0xf4, 0x18, 0xde, 0xe0,
0x3e, 0x2a, 0xee, 0x16, 0xf3, 0xda, 0xbb, 0xe5, 0x0e, 0xf4, 0x4b, 0xae, 0x25, 0x16, 0x1e, 0xf1,
0x64, 0x1a, 0x7c, 0xaa, 0x88, 0x25, 0xd7, 0x2a, 0xe2, 0xdf, 0x06, 0x90, 0x7d, 0x0c, 0xf1, 0x46,
0x21, 0x6b, 0x2e, 0xdd, 0xca, 0x65, 0xd0, 0xa8, 0x5e, 0x06, 0x8f, 0xe0, 0x16, 0x37, 0x11, 0xd1,
0x46, 0x81, 0xc7, 0x3c, 0x75, 0xa3, 0xf5, 0x68, 0x2a, 0x21, 0xec, 0x7b, 0xcc, 0x53, 0x8e, 0x12,
0xf4, 0xb3, 0x84, 0x5f, 0x72, 0x62, 0xf2, 0x85, 0x23, 0x37, 0x17, 0xf1, 0x5c, 0x4a, 0x98, 0x55,
0x2e, 0x7f, 0x19, 0xd0, 0x7f, 0x91, 0xa6, 0x74, 0x1c, 0x7d, 0x1f, 0x87, 0xd9, 0x04, 0xf3, 0x64,
0xd6, 0xa0, 0xe5, 0xc7, 0x99, 0xfa, 0xbe, 0x2d, 0x57, 0x6e, 0xe6, 0x06, 0xdf, 0xac, 0x0c, 0xfe,
0x1c, 0x75, 0x34, 0xaa, 0xd4, 0xa1, 0x51, 0x43, 0x53, 0xa7, 0x06, 0xf2, 0x19, 0x74, 0x79, 0x7a,
0x23, 0x1f, 0x23, 0x86, 0x89, 0xe2, 0x50, 0xe0, 0xa2, 0x3d, 0x21, 0x71, 0x2e, 0x60, 0xad, 0x0c,
0x54, 0xf5, 0xe2, 0x42, 0x46, 0xe7, 0xbc, 0x98, 0x84, 0x0a, 0x25, 0x5f, 0x0a, 0x36, 0xc9, 0x4e,
0x43, 0xea, 0x8f, 0xb8, 0xa2, 0xa1, 0xd8, 0x44, 0x48, 0x4e, 0x92, 0x70, 0x96, 0x73, 0x53, 0xcb,
0xd9, 0xf9, 0x1a, 0xfa, 0xf2, 0x65, 0x54, 0x2e, 0xd0, 0x06, 0xc0, 0x85, 0x10, 0x8c, 0x68, 0x20,
0x1f, 0x05, 0x96, 0x6b, 0x49, 0xc9, 0xeb, 0x20, 0x75, 0xbe, 0x05, 0xeb, 0x30, 0x96, 0x39, 0xa7,
0x64, 0x07, 0xac, 0x30, 0xdf, 0xa8, 0xf7, 0x03, 0x99, 0xb5, 0x5c, 0x6e, 0xe7, 0xce, 0x8c, 0x9c,
0xe7, 0xd0, 0xc9, 0xc5, 0x79, 0x1e, 0xc6, 0xa2, 0x3c, 0xcc, 0xb9, 0x3c, 0x9c, 0x7f, 0x0d, 0x58,
0x2b, 0x43, 0x56, 0xa5, 0x3a, 0x81, 0x95, 0x22, 0xc4, 0x68, 0xe2, 0x4d, 0x15, 0x96, 0x1d, 0x1d,
0x4b, 0xf5, 0x58, 0x01, 0x30, 0x7d, 0xe3, 0x4d, 0x65, 0xf7, 0xf4, 0x42, 0x4d, 0x34, 0x78, 0x07,
0xab, 0x15, 0x93, 0x1a, 0x6a, 0x7a, 0xa2, 0x53, 0x53, 0x89, 0x39, 0x8b, 0xd3, 0x3a, 0x5f, 0x3d,
0x83, 0x7b, 0xb2, 0x61, 0xf7, 0x8a, 0xfe, 0xca, 0x6b, 0x5f, 0x6e, 0x43, 0x63, 0xbe, 0x0d, 0x9d,
0x01, 0xd8, 0xd5, 0xa3, 0x32, 0x99, 0xdd, 0x3f, 0xda, 0xd0, 0x3b, 0x46, 0xef, 0x12, 0x31, 0xe0,
0xcc, 0x96, 0x90, 0x71, 0x5e, 0xac, 0xf2, 0xcb, 0x97, 0x3c, 0x9e, 0xaf, 0x4a, 0xed, 0x53, 0x7b,
0xf0, 0xc5, 0x87, 0xcc, 0xd4, 0xa0, 0x2d, 0x91, 0x43, 0xe8, 0x6a, 0x4f, 0x4b, 0xb2, 0xae, 0x1d,
0xac, 0xbc, 0x98, 0x07, 0x1b, 0x0b, 0xb4, 0x85, 0x37, 0x0f, 0x48, 0xf5, 0x06, 0x21, 0x9f, 0x5f,
0x7f, 0xbf, 0x48, 0xdf, 0x8f, 0x3e, 0xe6, 0x12, 0x92, 0x80, 0x35, 0xc2, 0xd5, 0x01, 0x57, 0x29,
0x5e, 0x07, 0x5c, 0xc7, 0xd2, 0xc2, 0x9b, 0x46, 0xa6, 0xba, 0xb7, 0x2a, 0x7d, 0xeb, 0xde, 0xea,
0x18, 0x58, 0x78, 0xd3, 0xe8, 0x4c, 0xf7, 0x56, 0x65, 0x66, 0xdd, 0x5b, 0x1d, 0x07, 0x2e, 0x91,
0xb7, 0xd0, 0xd3, 0xb9, 0x85, 0x68, 0x07, 0x6a, 0xc8, 0x71, 0xf0, 0x60, 0x91, 0x5a, 0x77, 0xa8,
0x8f, 0x92, 0xee, 0xb0, 0x86, 0x4c, 0x74, 0x87, 0x75, 0x13, 0xe8, 0x2c, 0x91, 0x1f, 0xe1, 0xf6,
0x7c, 0x4b, 0x93, 0x87, 0xf3, 0x69, 0x55, 0x26, 0x65, 0xe0, 0x5c, 0x67, 0x92, 0x3b, 0x3f, 0x6d,
0x8b, 0xdf, 0xcb, 0xaf, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe3, 0x2a, 0x44, 0x6d, 0x0e,
0x00, 0x00,
} }

123
weed/s3api/filer_multipart.go

@ -1,11 +1,18 @@
package s3api package s3api
import ( import (
"github.com/aws/aws-sdk-go/service/s3"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/satori/go.uuid"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/satori/go.uuid"
) )
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *s3.CreateMultipartUploadOutput, code ErrorCode) { func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *s3.CreateMultipartUploadOutput, code ErrorCode) {
@ -14,9 +21,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
if entry.Extended == nil { if entry.Extended == nil {
entry.Extended = make(map[string]string)
entry.Extended = make(map[string][]byte)
} }
entry.Extended["key"] = *input.Key
entry.Extended["key"] = []byte(*input.Key)
}); err != nil { }); err != nil {
glog.Errorf("NewMultipartUpload error: %v", err) glog.Errorf("NewMultipartUpload error: %v", err)
return nil, ErrInternalError return nil, ErrInternalError
@ -32,19 +39,76 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
} }
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *s3.CompleteMultipartUploadOutput, code ErrorCode) { func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *s3.CompleteMultipartUploadOutput, code ErrorCode) {
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, err := s3a.list(uploadDirectory, "", "", false, 0)
if err != nil {
glog.Errorf("completeMultipartUpload %s *s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
}
var finalParts []*filer_pb.FileChunk
var offset int64
for _, entry := range entries {
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
for _, chunk := range entry.Chunks {
finalParts = append(finalParts, &filer_pb.FileChunk{
FileId: chunk.FileId,
Offset: offset,
Size: chunk.Size,
Mtime: chunk.Mtime,
ETag: chunk.ETag,
})
offset += int64(chunk.Size)
}
}
}
entryName := filepath.Base(*input.Key)
dirName := filepath.Dir(*input.Key)
if dirName == "." {
dirName = ""
}
dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
err = s3a.mkFile(dirName, entryName, finalParts)
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
return nil, ErrInternalError
}
output = &s3.CompleteMultipartUploadOutput{
Bucket: input.Bucket,
ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
Key: input.Key,
}
return return
} }
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
return
}
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *s3.ListMultipartUploadsOutput, code ErrorCode) {
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket))
exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil { if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload return nil, ErrNoSuchUpload
} }
if exists {
err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrInternalError
}
return &s3.AbortMultipartUploadOutput{}, ErrNone
}
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *s3.ListMultipartUploadsOutput, code ErrorCode) {
output = &s3.ListMultipartUploadsOutput{ output = &s3.ListMultipartUploadsOutput{
Bucket: input.Bucket, Bucket: input.Bucket,
Delimiter: input.Delimiter, Delimiter: input.Delimiter,
@ -53,11 +117,18 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
MaxUploads: input.MaxUploads, MaxUploads: input.MaxUploads,
Prefix: input.Prefix, Prefix: input.Prefix,
} }
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
}
for _, entry := range entries { for _, entry := range entries {
if entry.Extended != nil { if entry.Extended != nil {
key := entry.Extended["key"] key := entry.Extended["key"]
output.Uploads = append(output.Uploads, &s3.MultipartUpload{ output.Uploads = append(output.Uploads, &s3.MultipartUpload{
Key: aws.String(key),
Key: aws.String(string(key)),
UploadId: aws.String(entry.Name), UploadId: aws.String(entry.Name),
}) })
} }
@ -66,5 +137,37 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
} }
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *s3.ListPartsOutput, code ErrorCode) { func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *s3.ListPartsOutput, code ErrorCode) {
output = &s3.ListPartsOutput{
Bucket: input.Bucket,
Key: input.Key,
UploadId: input.UploadId,
MaxParts: input.MaxParts, // the maximum number of parts to return.
PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
}
entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId,
"", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s *s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
}
for _, entry := range entries {
if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory {
partNumberString := entry.Name[:len(entry.Name)-len(".part")]
partNumber, err := strconv.Atoi(partNumberString)
if err != nil {
glog.Errorf("listObjectParts %s *s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
continue
}
output.Parts = append(output.Parts, &s3.Part{
PartNumber: aws.Int64(int64(partNumber)),
LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
})
}
}
return return
} }

44
weed/s3api/filer_util.go

@ -34,7 +34,7 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun
Entry: entry, Entry: entry,
} }
glog.V(1).Infof("create bucket: %v", request)
glog.V(1).Infof("mkdir: %v", request)
if _, err := client.CreateEntry(context.Background(), request); err != nil { if _, err := client.CreateEntry(context.Background(), request); err != nil {
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
} }
@ -43,12 +43,46 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun
}) })
} }
func (s3a *S3ApiServer) list(parentDirectoryPath string) (entries []*filer_pb.Entry, err error) {
func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error {
return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
entry := &filer_pb.Entry{
Name: fileName,
IsDirectory: false,
Attributes: &filer_pb.FuseAttributes{
Mtime: time.Now().Unix(),
Crtime: time.Now().Unix(),
FileMode: uint32(0770),
Uid: OS_UID,
Gid: OS_GID,
},
Chunks: chunks,
}
request := &filer_pb.CreateEntryRequest{
Directory: parentDirectoryPath,
Entry: entry,
}
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
if _, err := client.CreateEntry(context.Background(), request); err != nil {
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
}
return nil
})
}
func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) {
err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.ListEntriesRequest{ request := &filer_pb.ListEntriesRequest{
Directory: s3a.option.BucketsPath,
Directory: s3a.option.BucketsPath,
Prefix: prefix,
StartFromFileName: startFrom,
InclusiveStartFrom: inclusive,
Limit: uint32(limit),
} }
glog.V(4).Infof("read directory: %v", request) glog.V(4).Infof("read directory: %v", request)
@ -101,10 +135,10 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD
Name: entryName, Name: entryName,
} }
glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request)
glog.V(1).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
resp, err := client.LookupDirectoryEntry(ctx, request) resp, err := client.LookupDirectoryEntry(ctx, request)
if err != nil { if err != nil {
return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err)
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
} }
exists = resp.Entry.IsDirectory == isDirectory exists = resp.Entry.IsDirectory == isDirectory

2
weed/s3api/s3api_bucket_handlers.go

@ -21,7 +21,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
var response ListAllMyBucketsResponse var response ListAllMyBucketsResponse
entries, err := s3a.list(s3a.option.BucketsPath)
entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, 0)
if err != nil { if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL) writeErrorResponse(w, ErrInternalError, r.URL)

14
weed/s3api/s3api_object_multipart_handlers.go

@ -1,16 +1,16 @@
package s3api package s3api
import ( import (
"net/http"
"github.com/gorilla/mux"
"encoding/xml"
"fmt" "fmt"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"net/url"
"strconv"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/gorilla/mux"
"io/ioutil" "io/ioutil"
"encoding/xml"
"net/http"
"net/url"
"sort" "sort"
"strconv"
"strings" "strings"
) )
@ -143,6 +143,8 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
return return
} }
// TODO handle encodingType
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))
} }

2
weed/s3api/s3api_objects_list_handlers.go

@ -126,7 +126,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
contents = append(contents, &s3.Object{ contents = append(contents, &s3.Object{
Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)), Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)),
LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
ETag: aws.String("\"2345sgfwetrewrt\""), // TODO add etag
ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
Owner: &s3.Owner{ Owner: &s3.Owner{
ID: aws.String("bcaf161ca5fb16fd081034f"), ID: aws.String("bcaf161ca5fb16fd081034f"),

2
weed/s3api/s3api_server.go

@ -37,7 +37,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
apiRouter := router.PathPrefix("/").Subrouter() apiRouter := router.PathPrefix("/").Subrouter()
var routers []*mux.Router var routers []*mux.Router
if s3a.option.DomainName != "" { if s3a.option.DomainName != "" {
routers = append(routers, apiRouter.Host("{bucket:.+}."+ s3a.option.DomainName).Subrouter())
routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter())
} }
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter()) routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())

1
weed/server/filer_server_handlers_read.go

@ -119,6 +119,7 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque
if mimeType != "" { if mimeType != "" {
w.Header().Set("Content-Type", mimeType) w.Header().Set("Content-Type", mimeType)
} }
setEtag(w, filer2.ETag(entry.Chunks))
totalSize := int64(filer2.TotalSize(entry.Chunks)) totalSize := int64(filer2.TotalSize(entry.Chunks))

2
weed/server/filer_server_handlers_write.go

@ -150,6 +150,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
defer resp.Body.Close() defer resp.Body.Close()
etag := resp.Header.Get("ETag")
resp_body, ra_err := ioutil.ReadAll(resp.Body) resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil { if ra_err != nil {
glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, ra_err.Error()) glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, ra_err.Error())
@ -202,6 +203,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
FileId: fileId, FileId: fileId,
Size: uint64(ret.Size), Size: uint64(ret.Size),
Mtime: time.Now().UnixNano(), Mtime: time.Now().UnixNano(),
ETag: etag,
}}, }},
} }
if db_err := fs.filer.CreateEntry(entry); db_err != nil { if db_err := fs.filer.CreateEntry(entry); db_err != nil {

7
weed/server/volume_server_handlers_read.go

@ -68,7 +68,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
count, e := vs.store.ReadVolumeNeedle(volumeId, n) count, e := vs.store.ReadVolumeNeedle(volumeId, n)
glog.V(4).Infoln("read bytes", count, "error", e) glog.V(4).Infoln("read bytes", count, "error", e)
if e != nil || count < 0 { if e != nil || count < 0 {
glog.V(0).Infoln("read error:", e, r.URL.Path)
glog.V(0).Infof("read %s error: %v", r.URL.Path, e)
w.WriteHeader(http.StatusNotFound) w.WriteHeader(http.StatusNotFound)
return return
} }
@ -88,12 +88,11 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
} }
} }
etag := n.Etag()
if inm := r.Header.Get("If-None-Match"); inm == etag {
if inm := r.Header.Get("If-None-Match"); inm == "\""+n.Etag()+"\"" {
w.WriteHeader(http.StatusNotModified) w.WriteHeader(http.StatusNotModified)
return return
} }
w.Header().Set("Etag", etag)
setEtag(w, n.Etag())
if n.HasPairs() { if n.HasPairs() {
pairMap := make(map[string]string) pairMap := make(map[string]string)

9
weed/server/volume_server_handlers_write.go

@ -44,8 +44,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ret.Name = string(needle.Name) ret.Name = string(needle.Name)
} }
ret.Size = size ret.Size = size
etag := needle.Etag()
w.Header().Set("Etag", etag)
setEtag(w, needle.Etag())
writeJsonQuiet(w, r, httpStatus, ret) writeJsonQuiet(w, r, httpStatus, ret)
} }
@ -173,3 +172,9 @@ func (vs *VolumeServer) batchDeleteHandler(w http.ResponseWriter, r *http.Reques
writeJsonQuiet(w, r, http.StatusAccepted, ret) writeJsonQuiet(w, r, http.StatusAccepted, ret)
} }
func setEtag(w http.ResponseWriter, etag string) {
if etag != "" {
w.Header().Set("ETag", "\""+etag+"\"")
}
}

2
weed/storage/crc.go

@ -26,5 +26,5 @@ func (c CRC) Value() uint32 {
func (n *Needle) Etag() string { func (n *Needle) Etag() string {
bits := make([]byte, 4) bits := make([]byte, 4)
util.Uint32toBytes(bits, uint32(n.Checksum)) util.Uint32toBytes(bits, uint32(n.Checksum))
return fmt.Sprintf("\"%x\"", bits)
return fmt.Sprintf("%x", bits)
} }

11
weed/storage/volume_loading.go

@ -19,8 +19,9 @@ func loadVolumeWithoutIndex(dirname string, collection string, id VolumeId, need
func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error { func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind NeedleMapType, preallocate int64) error {
var e error var e error
fileName := v.FileName() fileName := v.FileName()
alreadyHasSuperBlock := false
if exists, canRead, canWrite, modifiedTime := checkFile(fileName + ".dat"); exists {
if exists, canRead, canWrite, modifiedTime, fileSize := checkFile(fileName + ".dat"); exists {
if !canRead { if !canRead {
return fmt.Errorf("cannot read Volume Data file %s.dat", fileName) return fmt.Errorf("cannot read Volume Data file %s.dat", fileName)
} }
@ -32,6 +33,9 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
v.dataFile, e = os.Open(fileName + ".dat") v.dataFile, e = os.Open(fileName + ".dat")
v.readOnly = true v.readOnly = true
} }
if fileSize >= _SuperBlockSize {
alreadyHasSuperBlock = true
}
} else { } else {
if createDatIfMissing { if createDatIfMissing {
v.dataFile, e = createVolumeFile(fileName+".dat", preallocate) v.dataFile, e = createVolumeFile(fileName+".dat", preallocate)
@ -48,7 +52,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
} }
} }
if v.ReplicaPlacement == nil {
if alreadyHasSuperBlock {
e = v.readSuperBlock() e = v.readSuperBlock()
} else { } else {
e = v.maybeWriteSuperBlock() e = v.maybeWriteSuperBlock()
@ -97,7 +101,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
return e return e
} }
func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Time) {
func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Time, fileSize int64) {
exists = true exists = true
fi, err := os.Stat(filename) fi, err := os.Stat(filename)
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -111,5 +115,6 @@ func checkFile(filename string) (exists, canRead, canWrite bool, modTime time.Ti
canWrite = true canWrite = true
} }
modTime = fi.ModTime() modTime = fi.ModTime()
fileSize = fi.Size()
return return
} }

2
weed/util/constants.go

@ -1,5 +1,5 @@
package util package util
const ( const (
VERSION = "0.95"
VERSION = "0.96"
) )
Loading…
Cancel
Save