|
@ -121,7 +121,7 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol |
|
|
return resp, nil |
|
|
return resp, nil |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) { |
|
|
|
|
|
|
|
|
func (fs *FilerServer) lookupFileId(ctx context.Context, fileId string) (targetUrls []string, err error) { |
|
|
fid, err := needle.ParseFileIdFromString(fileId) |
|
|
fid, err := needle.ParseFileIdFromString(fileId) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
return nil, err |
|
|
return nil, err |
|
@ -142,12 +142,12 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr |
|
|
|
|
|
|
|
|
resp = &filer_pb.CreateEntryResponse{} |
|
|
resp = &filer_pb.CreateEntryResponse{} |
|
|
|
|
|
|
|
|
chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry) |
|
|
|
|
|
|
|
|
chunks, garbage, err2 := fs.cleanupChunks(ctx, util.Join(req.Directory, req.Entry.Name), nil, req.Entry) |
|
|
if err2 != nil { |
|
|
if err2 != nil { |
|
|
return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) |
|
|
return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
so, err := fs.detectStorageOption(string(util.NewFullPath(req.Directory, req.Entry.Name)), "", "", 0, "", "", "", "") |
|
|
|
|
|
|
|
|
so, err := fs.detectStorageOption(ctx, string(util.NewFullPath(req.Directory, req.Entry.Name)), "", "", 0, "", "", "", "") |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
return nil, err |
|
|
return nil, err |
|
|
} |
|
|
} |
|
@ -177,7 +177,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr |
|
|
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) |
|
|
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry) |
|
|
|
|
|
|
|
|
chunks, garbage, err2 := fs.cleanupChunks(ctx, fullpath, entry, req.Entry) |
|
|
if err2 != nil { |
|
|
if err2 != nil { |
|
|
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) |
|
|
return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) |
|
|
} |
|
|
} |
|
@ -201,11 +201,11 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr |
|
|
return &filer_pb.UpdateEntryResponse{}, err |
|
|
return &filer_pb.UpdateEntryResponse{}, err |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { |
|
|
|
|
|
|
|
|
func (fs *FilerServer) cleanupChunks(ctx context.Context, fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { |
|
|
|
|
|
|
|
|
// remove old chunks if not included in the new ones
|
|
|
// remove old chunks if not included in the new ones
|
|
|
if existingEntry != nil { |
|
|
if existingEntry != nil { |
|
|
garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks()) |
|
|
|
|
|
|
|
|
garbage, err = filer.MinusChunks(ctx, fs.lookupFileId, existingEntry.GetChunks(), newEntry.GetChunks()) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err) |
|
|
return newEntry.GetChunks(), nil, fmt.Errorf("MinusChunks: %v", err) |
|
|
} |
|
|
} |
|
@ -214,11 +214,11 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry |
|
|
// files with manifest chunks are usually large and append only, skip calculating covered chunks
|
|
|
// files with manifest chunks are usually large and append only, skip calculating covered chunks
|
|
|
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.GetChunks()) |
|
|
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.GetChunks()) |
|
|
|
|
|
|
|
|
chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks) |
|
|
|
|
|
|
|
|
chunks, coveredChunks := filer.CompactFileChunks(ctx, fs.lookupFileId, nonManifestChunks) |
|
|
garbage = append(garbage, coveredChunks...) |
|
|
garbage = append(garbage, coveredChunks...) |
|
|
|
|
|
|
|
|
if newEntry.Attributes != nil { |
|
|
if newEntry.Attributes != nil { |
|
|
so, _ := fs.detectStorageOption(fullpath, |
|
|
|
|
|
|
|
|
so, _ := fs.detectStorageOption(ctx, fullpath, |
|
|
"", |
|
|
"", |
|
|
"", |
|
|
"", |
|
|
newEntry.Attributes.TtlSec, |
|
|
newEntry.Attributes.TtlSec, |
|
@ -227,7 +227,7 @@ func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry |
|
|
"", |
|
|
"", |
|
|
"", |
|
|
"", |
|
|
) // ignore readonly error for capacity needed to manifestize
|
|
|
) // ignore readonly error for capacity needed to manifestize
|
|
|
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks) |
|
|
|
|
|
|
|
|
chunks, err = filer.MaybeManifestize(fs.saveAsChunk(ctx, so), chunks) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
// not good, but should be ok
|
|
|
// not good, but should be ok
|
|
|
glog.V(0).Infof("MaybeManifestize: %v", err) |
|
|
glog.V(0).Infof("MaybeManifestize: %v", err) |
|
@ -271,12 +271,12 @@ func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendTo |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
entry.Chunks = append(entry.GetChunks(), req.Chunks...) |
|
|
entry.Chunks = append(entry.GetChunks(), req.Chunks...) |
|
|
so, err := fs.detectStorageOption(string(fullpath), "", "", entry.TtlSec, "", "", "", "") |
|
|
|
|
|
|
|
|
so, err := fs.detectStorageOption(ctx, string(fullpath), "", "", entry.TtlSec, "", "", "", "") |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
glog.Warningf("detectStorageOption: %v", err) |
|
|
glog.Warningf("detectStorageOption: %v", err) |
|
|
return &filer_pb.AppendToEntryResponse{}, err |
|
|
return &filer_pb.AppendToEntryResponse{}, err |
|
|
} |
|
|
} |
|
|
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.GetChunks()) |
|
|
|
|
|
|
|
|
entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(ctx, so), entry.GetChunks()) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
// not good, but should be ok
|
|
|
// not good, but should be ok
|
|
|
glog.V(0).Infof("MaybeManifestize: %v", err) |
|
|
glog.V(0).Infof("MaybeManifestize: %v", err) |
|
@ -305,7 +305,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol |
|
|
req.DiskType = fs.option.DiskType |
|
|
req.DiskType = fs.option.DiskType |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
so, err := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack, req.DataNode) |
|
|
|
|
|
|
|
|
so, err := fs.detectStorageOption(ctx, req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack, req.DataNode) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
glog.V(3).Infof("AssignVolume: %v", err) |
|
|
glog.V(3).Infof("AssignVolume: %v", err) |
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil |
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil |
|
@ -313,7 +313,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol |
|
|
|
|
|
|
|
|
assignRequest, altRequest := so.ToAssignRequests(int(req.Count)) |
|
|
assignRequest, altRequest := so.ToAssignRequests(int(req.Count)) |
|
|
|
|
|
|
|
|
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) |
|
|
|
|
|
|
|
|
assignResult, err := operation.Assign(ctx, fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) |
|
|
if err != nil { |
|
|
if err != nil { |
|
|
glog.V(3).Infof("AssignVolume: %v", err) |
|
|
glog.V(3).Infof("AssignVolume: %v", err) |
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil |
|
|
return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil |
|
|