hilimd
4 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 3135 additions and 497 deletions
-
2.github/workflows/go.yml
-
21README.md
-
23docker/entrypoint.sh
-
4go.mod
-
4go.sum
-
2k8s/seaweedfs/Chart.yaml
-
2k8s/seaweedfs/values.yaml
-
2other/java/client/pom.xml
-
2other/java/client/pom.xml.deploy
-
2other/java/client/pom_debug.xml
-
2other/java/hdfs2/dependency-reduced-pom.xml
-
2other/java/hdfs2/pom.xml
-
7other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java
-
2other/java/hdfs3/dependency-reduced-pom.xml
-
2other/java/hdfs3/pom.xml
-
1856other/metrics/grafana_seaweedfs.json
-
82test/s3/basic/object_tagging_test.go
-
9weed/command/benchmark.go
-
24weed/command/filer.go
-
34weed/command/master.go
-
27weed/command/mount_std.go
-
14weed/command/s3.go
-
2weed/command/scaffold.go
-
7weed/command/server.go
-
5weed/command/volume.go
-
36weed/filer/filechunk_manifest.go
-
12weed/filer/filechunks.go
-
2weed/filer/filer_delete_entry.go
-
138weed/filer/reader_at.go
-
48weed/filer/stream.go
-
9weed/filesys/dir.go
-
6weed/filesys/dir_link.go
-
5weed/filesys/wfs.go
-
3weed/operation/submit.go
-
2weed/pb/master.proto
-
238weed/pb/master_pb/master.pb.go
-
40weed/replication/repl_util/replication_utli.go
-
25weed/replication/sink/azuresink/azure_sink.go
-
30weed/replication/sink/b2sink/b2_sink.go
-
22weed/replication/sink/gcssink/gcs_sink.go
-
11weed/replication/sink/s3sink/s3_write.go
-
21weed/replication/source/filer_source.go
-
8weed/s3api/auth_credentials.go
-
104weed/s3api/filer_util_tags.go
-
117weed/s3api/s3api_object_tagging_handlers.go
-
17weed/s3api/s3api_server.go
-
6weed/s3api/s3err/s3api_errors.go
-
27weed/s3api/stats.go
-
38weed/s3api/tags.go
-
50weed/s3api/tags_test.go
-
55weed/server/filer_grpc_server.go
-
33weed/server/filer_server.go
-
1weed/server/filer_server_handlers_write_autochunk.go
-
17weed/server/master_grpc_server.go
-
13weed/server/master_grpc_server_volume.go
-
5weed/server/master_server.go
-
133weed/server/raft_server.go
-
14weed/server/raft_server_handlers.go
-
2weed/server/volume_grpc_client_to_master.go
-
4weed/server/volume_server_handlers_read.go
-
3weed/server/volume_server_handlers_write.go
-
2weed/server/webdav_server.go
-
4weed/stats/metrics.go
-
2weed/storage/backend/volume_create_linux.go
-
3weed/storage/disk_location.go
-
4weed/storage/store.go
-
4weed/storage/store_ec.go
-
3weed/storage/volume_read_write.go
-
32weed/util/chunk_cache/chunk_cache.go
-
51weed/util/chunk_cache/chunk_cache_on_disk_test.go
-
8weed/util/chunk_cache/on_disk_cache_layer.go
-
2weed/util/constants.go
-
2weed/util/fullpath.go
-
30weed/wdclient/masterclient.go
-
51weed/wdclient/vid_map.go
@ -1,4 +1,4 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
version: 2.00 |
|
||||
|
version: 2.03 |
1856
other/metrics/grafana_seaweedfs.json
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,82 @@ |
|||||
|
package basic |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/aws/aws-sdk-go/aws" |
||||
|
"github.com/aws/aws-sdk-go/service/s3" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestObjectTagging(t *testing.T) { |
||||
|
|
||||
|
input := &s3.PutObjectInput{ |
||||
|
Bucket: aws.String("theBucket"), |
||||
|
Key: aws.String("testDir/testObject"), |
||||
|
} |
||||
|
|
||||
|
svc.PutObject(input) |
||||
|
|
||||
|
printTags() |
||||
|
|
||||
|
setTags() |
||||
|
|
||||
|
printTags() |
||||
|
|
||||
|
clearTags() |
||||
|
|
||||
|
printTags() |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func printTags() { |
||||
|
response, err := svc.GetObjectTagging( |
||||
|
&s3.GetObjectTaggingInput{ |
||||
|
Bucket: aws.String("theBucket"), |
||||
|
Key: aws.String("testDir/testObject"), |
||||
|
}) |
||||
|
|
||||
|
fmt.Println("printTags") |
||||
|
if err != nil { |
||||
|
fmt.Println(err.Error()) |
||||
|
} |
||||
|
|
||||
|
fmt.Println(response.TagSet) |
||||
|
} |
||||
|
|
||||
|
func setTags() { |
||||
|
|
||||
|
response, err := svc.PutObjectTagging(&s3.PutObjectTaggingInput{ |
||||
|
Bucket: aws.String("theBucket"), |
||||
|
Key: aws.String("testDir/testObject"), |
||||
|
Tagging: &s3.Tagging{ |
||||
|
TagSet: []*s3.Tag{ |
||||
|
{ |
||||
|
Key: aws.String("kye2"), |
||||
|
Value: aws.String("value2"), |
||||
|
}, |
||||
|
}, |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
fmt.Println("setTags") |
||||
|
if err != nil { |
||||
|
fmt.Println(err.Error()) |
||||
|
} |
||||
|
|
||||
|
fmt.Println(response.String()) |
||||
|
} |
||||
|
|
||||
|
func clearTags() { |
||||
|
|
||||
|
response, err := svc.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{ |
||||
|
Bucket: aws.String("theBucket"), |
||||
|
Key: aws.String("testDir/testObject"), |
||||
|
}) |
||||
|
|
||||
|
fmt.Println("clearTags") |
||||
|
if err != nil { |
||||
|
fmt.Println(err.Error()) |
||||
|
} |
||||
|
|
||||
|
fmt.Println(response.String()) |
||||
|
} |
@ -0,0 +1,40 @@ |
|||||
|
package repl_util |
||||
|
|
||||
|
import ( |
||||
|
"github.com/chrislusf/seaweedfs/weed/filer" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/chrislusf/seaweedfs/weed/replication/source" |
||||
|
"github.com/chrislusf/seaweedfs/weed/util" |
||||
|
) |
||||
|
|
||||
|
func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error { |
||||
|
|
||||
|
for _, chunk := range chunkViews { |
||||
|
|
||||
|
fileUrls, err := filerSource.LookupFileId(chunk.FileId) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
var writeErr error |
||||
|
|
||||
|
for _, fileUrl := range fileUrls { |
||||
|
err = util.ReadUrlAsStream(fileUrl+"?readDeleted=true", nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { |
||||
|
writeErr = writeFunc(data) |
||||
|
}) |
||||
|
if err != nil { |
||||
|
glog.V(1).Infof("read from %s: %v", fileUrl, err) |
||||
|
} else if writeErr != nil { |
||||
|
glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr) |
||||
|
} else { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
return nil |
||||
|
} |
@ -0,0 +1,104 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"strings" |
||||
|
|
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
S3TAG_PREFIX = "s3-" |
||||
|
) |
||||
|
|
||||
|
func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) { |
||||
|
|
||||
|
err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ |
||||
|
Directory: parentDirectoryPath, |
||||
|
Name: entryName, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
tags = make(map[string]string) |
||||
|
for k, v := range resp.Entry.Extended { |
||||
|
if strings.HasPrefix(k, S3TAG_PREFIX) { |
||||
|
tags[k[len(S3TAG_PREFIX):]] = string(v) |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, tags map[string]string) (err error) { |
||||
|
|
||||
|
return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ |
||||
|
Directory: parentDirectoryPath, |
||||
|
Name: entryName, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
for k, _ := range resp.Entry.Extended { |
||||
|
if strings.HasPrefix(k, S3TAG_PREFIX) { |
||||
|
delete(resp.Entry.Extended, k) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if resp.Entry.Extended == nil { |
||||
|
resp.Entry.Extended = make(map[string][]byte) |
||||
|
} |
||||
|
for k, v := range tags { |
||||
|
resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v) |
||||
|
} |
||||
|
|
||||
|
return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ |
||||
|
Directory: parentDirectoryPath, |
||||
|
Entry: resp.Entry, |
||||
|
IsFromOtherCluster: false, |
||||
|
Signatures: nil, |
||||
|
}) |
||||
|
|
||||
|
}) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (err error) { |
||||
|
|
||||
|
return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
||||
|
|
||||
|
resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ |
||||
|
Directory: parentDirectoryPath, |
||||
|
Name: entryName, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
hasDeletion := false |
||||
|
for k, _ := range resp.Entry.Extended { |
||||
|
if strings.HasPrefix(k, S3TAG_PREFIX) { |
||||
|
delete(resp.Entry.Extended, k) |
||||
|
hasDeletion = true |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if !hasDeletion { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ |
||||
|
Directory: parentDirectoryPath, |
||||
|
Entry: resp.Entry, |
||||
|
IsFromOtherCluster: false, |
||||
|
Signatures: nil, |
||||
|
}) |
||||
|
|
||||
|
}) |
||||
|
|
||||
|
} |
@ -0,0 +1,117 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/xml" |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/glog" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" |
||||
|
"github.com/chrislusf/seaweedfs/weed/util" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"net/http" |
||||
|
) |
||||
|
|
||||
|
// GetObjectTaggingHandler - GET object tagging
|
||||
|
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
|
||||
|
func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { |
||||
|
|
||||
|
bucket, object := getBucketAndObject(r) |
||||
|
|
||||
|
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) |
||||
|
dir, name := target.DirAndName() |
||||
|
|
||||
|
tags, err := s3a.getTags(dir, name) |
||||
|
if err != nil { |
||||
|
if err == filer_pb.ErrNotFound { |
||||
|
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) |
||||
|
} else { |
||||
|
glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
writeSuccessResponseXML(w, encodeResponse(FromTags(tags))) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
// PutObjectTaggingHandler Put object tagging
|
||||
|
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
|
||||
|
func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { |
||||
|
|
||||
|
bucket, object := getBucketAndObject(r) |
||||
|
|
||||
|
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) |
||||
|
dir, name := target.DirAndName() |
||||
|
|
||||
|
tagging := &Tagging{} |
||||
|
input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) |
||||
|
if err != nil { |
||||
|
glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL) |
||||
|
return |
||||
|
} |
||||
|
if err = xml.Unmarshal(input, tagging); err != nil { |
||||
|
glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrMalformedXML, r.URL) |
||||
|
return |
||||
|
} |
||||
|
tags := tagging.ToTags() |
||||
|
if len(tags) > 10 { |
||||
|
glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags)) |
||||
|
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) |
||||
|
return |
||||
|
} |
||||
|
for k, v := range tags { |
||||
|
if len(k) > 128 { |
||||
|
glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k) |
||||
|
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) |
||||
|
return |
||||
|
} |
||||
|
if len(v) > 256 { |
||||
|
glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v) |
||||
|
writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil { |
||||
|
if err == filer_pb.ErrNotFound { |
||||
|
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) |
||||
|
} else { |
||||
|
glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
w.WriteHeader(http.StatusNoContent) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
// DeleteObjectTaggingHandler Delete object tagging
|
||||
|
// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
|
||||
|
func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { |
||||
|
|
||||
|
bucket, object := getBucketAndObject(r) |
||||
|
|
||||
|
target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) |
||||
|
dir, name := target.DirAndName() |
||||
|
|
||||
|
err := s3a.rmTags(dir, name) |
||||
|
if err != nil { |
||||
|
if err == filer_pb.ErrNotFound { |
||||
|
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) |
||||
|
} else { |
||||
|
glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) |
||||
|
writeErrorResponse(w, s3err.ErrInternalError, r.URL) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
w.WriteHeader(http.StatusNoContent) |
||||
|
} |
@ -0,0 +1,38 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/xml" |
||||
|
) |
||||
|
|
||||
|
type Tag struct { |
||||
|
Key string `xml:"Key"` |
||||
|
Value string `xml:"Value"` |
||||
|
} |
||||
|
|
||||
|
type TagSet struct { |
||||
|
Tag []Tag `xml:"Tag"` |
||||
|
} |
||||
|
|
||||
|
type Tagging struct { |
||||
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"` |
||||
|
TagSet TagSet `xml:"TagSet"` |
||||
|
} |
||||
|
|
||||
|
func (t *Tagging) ToTags() map[string]string { |
||||
|
output := make(map[string]string) |
||||
|
for _, tag := range t.TagSet.Tag { |
||||
|
output[tag.Key] = tag.Value |
||||
|
} |
||||
|
return output |
||||
|
} |
||||
|
|
||||
|
func FromTags(tags map[string]string) (t *Tagging) { |
||||
|
t = &Tagging{} |
||||
|
for k, v := range tags { |
||||
|
t.TagSet.Tag = append(t.TagSet.Tag, Tag{ |
||||
|
Key: k, |
||||
|
Value: v, |
||||
|
}) |
||||
|
} |
||||
|
return |
||||
|
} |
@ -0,0 +1,50 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/xml" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestXMLUnmarshall(t *testing.T) { |
||||
|
|
||||
|
input := `<?xml version="1.0" encoding="UTF-8"?> |
||||
|
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> |
||||
|
<TagSet> |
||||
|
<Tag> |
||||
|
<Key>key1</Key> |
||||
|
<Value>value1</Value> |
||||
|
</Tag> |
||||
|
</TagSet> |
||||
|
</Tagging> |
||||
|
` |
||||
|
|
||||
|
tags := &Tagging{} |
||||
|
|
||||
|
xml.Unmarshal([]byte(input), tags) |
||||
|
|
||||
|
assert.Equal(t, len(tags.TagSet.Tag), 1) |
||||
|
assert.Equal(t, tags.TagSet.Tag[0].Key, "key1") |
||||
|
assert.Equal(t, tags.TagSet.Tag[0].Value, "value1") |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestXMLMarshall(t *testing.T) { |
||||
|
tags := &Tagging{ |
||||
|
TagSet: TagSet{ |
||||
|
[]Tag{ |
||||
|
{ |
||||
|
Key: "key1", |
||||
|
Value: "value1", |
||||
|
}, |
||||
|
}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
actual := string(encodeResponse(tags)) |
||||
|
|
||||
|
expected := `<?xml version="1.0" encoding="UTF-8"?> |
||||
|
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><TagSet><Tag><Key>key1</Key><Value>value1</Value></Tag></TagSet></Tagging>` |
||||
|
assert.Equal(t, expected, actual) |
||||
|
|
||||
|
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue