hilimd
3 years ago
committed by
GitHub
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
88 changed files with 2282 additions and 1020 deletions
-
54.github/workflows/binaries_dev.yml
-
31docker/compose/local-cluster-compose.yml
-
4k8s/helm_charts2/Chart.yaml
-
2other/java/client/pom.xml
-
2other/java/client/pom.xml.deploy
-
2other/java/client/pom_debug.xml
-
109other/java/client/src/main/java/seaweedfs/client/ReadChunks.java
-
90other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
-
14other/java/client/src/main/proto/filer.proto
-
123other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java
-
4other/java/examples/pom.xml
-
2other/java/hdfs2/dependency-reduced-pom.xml
-
2other/java/hdfs2/pom.xml
-
2other/java/hdfs3/dependency-reduced-pom.xml
-
2other/java/hdfs3/pom.xml
-
15test/s3/basic/basic_test.go
-
5unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
-
5unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
-
12weed/command/download.go
-
10weed/command/filer_copy.go
-
7weed/command/scaffold.go
-
5weed/filer.toml
-
23weed/filer/filechunks.go
-
119weed/filer/filechunks_read.go
-
210weed/filer/filechunks_read_test.go
-
39weed/filer/filer_conf.go
-
13weed/filer/filerstore_wrapper.go
-
7weed/filer/leveldb/leveldb_store_test.go
-
5weed/filer/leveldb2/leveldb2_store_test.go
-
5weed/filer/leveldb3/leveldb3_store_test.go
-
8weed/filer/rocksdb/rocksdb_store_test.go
-
117weed/filesys/dir_rename.go
-
4weed/filesys/meta_cache/meta_cache.go
-
2weed/filesys/meta_cache/meta_cache_subscribe.go
-
5weed/images/orientation_test.go
-
5weed/images/resizing_test.go
-
5weed/operation/chunked_file.go
-
5weed/operation/upload_content.go
-
14weed/pb/filer.proto
-
1333weed/pb/filer_pb/filer.pb.go
-
10weed/pb/filer_pb_tail.go
-
12weed/remote_storage/azure/azure_storage_client.go
-
12weed/remote_storage/gcs/gcs_storage_client.go
-
6weed/replication/sub/notification_kafka.go
-
34weed/s3api/auth_credentials.go
-
9weed/s3api/auth_credentials_subscribe.go
-
13weed/s3api/auth_signature_v4.go
-
8weed/s3api/auto_signature_v4_test.go
-
14weed/s3api/filer_multipart.go
-
97weed/s3api/s3api_bucket_handlers.go
-
1weed/s3api/s3api_handlers.go
-
25weed/s3api/s3api_object_handlers.go
-
16weed/s3api/s3api_object_handlers_postpolicy.go
-
9weed/s3api/s3api_object_multipart_handlers.go
-
8weed/s3api/s3api_object_skip_handlers.go
-
10weed/s3api/s3api_object_tagging_handlers.go
-
147weed/s3api/s3api_policy.go
-
22weed/s3api/s3api_server.go
-
16weed/s3api/s3api_xsd_generated.go
-
8weed/s3api/s3err/s3api_errors.go
-
5weed/s3api/tags.go
-
1weed/s3api/tags_test.go
-
16weed/security/tls.go
-
10weed/server/common.go
-
89weed/server/filer_grpc_server_rename.go
-
1weed/server/filer_grpc_server_sub_meta.go
-
18weed/server/filer_server_handlers_read.go
-
4weed/server/filer_server_handlers_write_autochunk.go
-
5weed/server/filer_server_handlers_write_upload.go
-
7weed/server/volume_grpc_copy.go
-
9weed/server/volume_grpc_erasure_coding.go
-
19weed/shell/command_fs_configure.go
-
9weed/shell/command_volume_fsck.go
-
31weed/storage/disk_location.go
-
13weed/storage/disk_location_ec.go
-
5weed/storage/needle/needle_parse_upload.go
-
3weed/storage/needle/needle_read_write_test.go
-
4weed/storage/needle_map_metric_test.go
-
61weed/storage/volume_backup.go
-
11weed/storage/volume_info/volume_info.go
-
5weed/storage/volume_vacuum_test.go
-
31weed/storage/volume_write_test.go
-
3weed/util/chunk_cache/chunk_cache_on_disk_test.go
-
2weed/util/chunk_cache/on_disk_cache_layer.go
-
2weed/util/constants.go
-
15weed/util/http_util.go
-
11weed/util/log_buffer/log_buffer.go
-
14weed/util/skiplist/skiplist_test.go
@ -1,5 +1,5 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
appVersion: "2.71" |
|
||||
version: "2.71" |
|
||||
|
appVersion: "2.74" |
||||
|
version: "2.74" |
@ -0,0 +1,109 @@ |
|||||
|
package seaweedfs.client; |
||||
|
|
||||
|
import java.io.IOException; |
||||
|
import java.util.ArrayList; |
||||
|
import java.util.Collections; |
||||
|
import java.util.Comparator; |
||||
|
import java.util.List; |
||||
|
|
||||
|
public class ReadChunks { |
||||
|
|
||||
|
public static List<SeaweedRead.VisibleInterval> readResolvedChunks(List<FilerProto.FileChunk> chunkList) throws IOException { |
||||
|
List<Point> points = new ArrayList<>(chunkList.size() * 2); |
||||
|
for (FilerProto.FileChunk chunk : chunkList) { |
||||
|
points.add(new Point(chunk.getOffset(), chunk, true)); |
||||
|
points.add(new Point(chunk.getOffset() + chunk.getSize(), chunk, false)); |
||||
|
} |
||||
|
Collections.sort(points, new Comparator<Point>() { |
||||
|
@Override |
||||
|
public int compare(Point a, Point b) { |
||||
|
int x = (int) (a.x - b.x); |
||||
|
if (a.x != b.x) { |
||||
|
return (int) (a.x - b.x); |
||||
|
} |
||||
|
if (a.ts != b.ts) { |
||||
|
return (int) (a.ts - b.ts); |
||||
|
} |
||||
|
if (!a.isStart) { |
||||
|
return -1; |
||||
|
} |
||||
|
return 1; |
||||
|
} |
||||
|
}); |
||||
|
|
||||
|
long prevX = 0; |
||||
|
List<SeaweedRead.VisibleInterval> visibles = new ArrayList<>(); |
||||
|
ArrayList<Point> queue = new ArrayList<>(); |
||||
|
for (Point point : points) { |
||||
|
if (point.isStart) { |
||||
|
if (queue.size() > 0) { |
||||
|
int lastIndex = queue.size() - 1; |
||||
|
Point lastPoint = queue.get(lastIndex); |
||||
|
if (point.x != prevX && lastPoint.ts < point.ts) { |
||||
|
addToVisibles(visibles, prevX, lastPoint, point); |
||||
|
prevX = point.x; |
||||
|
} |
||||
|
} |
||||
|
// insert into queue |
||||
|
for (int i = queue.size(); i >= 0; i--) { |
||||
|
if (i == 0 || queue.get(i - 1).ts <= point.ts) { |
||||
|
if (i == queue.size()) { |
||||
|
prevX = point.x; |
||||
|
} |
||||
|
queue.add(i, point); |
||||
|
break; |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
int lastIndex = queue.size() - 1; |
||||
|
int index = lastIndex; |
||||
|
Point startPoint = null; |
||||
|
for (; index >= 0; index--) { |
||||
|
startPoint = queue.get(index); |
||||
|
if (startPoint.ts == point.ts) { |
||||
|
queue.remove(index); |
||||
|
break; |
||||
|
} |
||||
|
} |
||||
|
if (index == lastIndex && startPoint != null) { |
||||
|
addToVisibles(visibles, prevX, startPoint, point); |
||||
|
prevX = point.x; |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return visibles; |
||||
|
|
||||
|
} |
||||
|
|
||||
|
private static void addToVisibles(List<SeaweedRead.VisibleInterval> visibles, long prevX, Point startPoint, Point point) { |
||||
|
if (prevX < point.x) { |
||||
|
FilerProto.FileChunk chunk = startPoint.chunk; |
||||
|
visibles.add(new SeaweedRead.VisibleInterval( |
||||
|
prevX, |
||||
|
point.x, |
||||
|
chunk.getFileId(), |
||||
|
chunk.getMtime(), |
||||
|
prevX - chunk.getOffset(), |
||||
|
chunk.getOffset() == prevX && chunk.getSize() == prevX - startPoint.x, |
||||
|
chunk.getCipherKey().toByteArray(), |
||||
|
chunk.getIsCompressed() |
||||
|
)); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
static class Point { |
||||
|
long x; |
||||
|
long ts; |
||||
|
FilerProto.FileChunk chunk; |
||||
|
boolean isStart; |
||||
|
|
||||
|
public Point(long x, FilerProto.FileChunk chunk, boolean isStart) { |
||||
|
this.x = x; |
||||
|
this.ts = chunk.getMtime(); |
||||
|
this.chunk = chunk; |
||||
|
this.isStart = isStart; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
@ -1,5 +0,0 @@ |
|||||
[redis3] |
|
||||
enabled = true |
|
||||
address = "localhost:6379" |
|
||||
password = "" |
|
||||
database = 0 |
|
@ -0,0 +1,119 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"sort" |
||||
|
) |
||||
|
|
||||
|
func readResolvedChunks(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) { |
||||
|
|
||||
|
var points []*Point |
||||
|
for _, chunk := range chunks { |
||||
|
points = append(points, &Point{ |
||||
|
x: chunk.Offset, |
||||
|
ts: chunk.Mtime, |
||||
|
chunk: chunk, |
||||
|
isStart: true, |
||||
|
}) |
||||
|
points = append(points, &Point{ |
||||
|
x: chunk.Offset + int64(chunk.Size), |
||||
|
ts: chunk.Mtime, |
||||
|
chunk: chunk, |
||||
|
isStart: false, |
||||
|
}) |
||||
|
} |
||||
|
sort.Slice(points, func(i, j int) bool { |
||||
|
if points[i].x != points[j].x { |
||||
|
return points[i].x < points[j].x |
||||
|
} |
||||
|
if points[i].ts != points[j].ts { |
||||
|
return points[i].ts < points[j].ts |
||||
|
} |
||||
|
if !points[i].isStart { |
||||
|
return true |
||||
|
} |
||||
|
return false |
||||
|
}) |
||||
|
|
||||
|
var prevX int64 |
||||
|
var queue []*Point |
||||
|
for _, point := range points { |
||||
|
if point.isStart { |
||||
|
if len(queue) > 0 { |
||||
|
lastIndex := len(queue) -1 |
||||
|
lastPoint := queue[lastIndex] |
||||
|
if point.x != prevX && lastPoint.ts < point.ts { |
||||
|
visibles = addToVisibles(visibles, prevX, lastPoint, point) |
||||
|
prevX = point.x |
||||
|
} |
||||
|
} |
||||
|
// insert into queue
|
||||
|
for i := len(queue); i >= 0; i-- { |
||||
|
if i == 0 || queue[i-1].ts <= point.ts { |
||||
|
if i == len(queue) { |
||||
|
prevX = point.x |
||||
|
} |
||||
|
queue = addToQueue(queue, i, point) |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
lastIndex := len(queue) - 1 |
||||
|
index := lastIndex |
||||
|
var startPoint *Point |
||||
|
for ; index >= 0; index-- { |
||||
|
startPoint = queue[index] |
||||
|
if startPoint.ts == point.ts { |
||||
|
queue = removeFromQueue(queue, index) |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
if index == lastIndex && startPoint != nil { |
||||
|
visibles = addToVisibles(visibles, prevX, startPoint, point) |
||||
|
prevX = point.x |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func removeFromQueue(queue []*Point, index int) []*Point { |
||||
|
for i := index; i < len(queue)-1; i++ { |
||||
|
queue[i] = queue[i+1] |
||||
|
} |
||||
|
queue = queue[:len(queue)-1] |
||||
|
return queue |
||||
|
} |
||||
|
|
||||
|
func addToQueue(queue []*Point, index int, point *Point) []*Point { |
||||
|
queue = append(queue, point) |
||||
|
for i := len(queue) - 1; i > index; i-- { |
||||
|
queue[i], queue[i-1] = queue[i-1], queue[i] |
||||
|
} |
||||
|
return queue |
||||
|
} |
||||
|
|
||||
|
func addToVisibles(visibles []VisibleInterval, prevX int64, startPoint *Point, point *Point) []VisibleInterval { |
||||
|
if prevX < point.x { |
||||
|
chunk := startPoint.chunk |
||||
|
visibles = append(visibles, VisibleInterval{ |
||||
|
start: prevX, |
||||
|
stop: point.x, |
||||
|
fileId: chunk.GetFileIdString(), |
||||
|
modifiedTime: chunk.Mtime, |
||||
|
chunkOffset: prevX - chunk.Offset, |
||||
|
chunkSize: chunk.Size, |
||||
|
cipherKey: chunk.CipherKey, |
||||
|
isGzipped: chunk.IsCompressed, |
||||
|
}) |
||||
|
} |
||||
|
return visibles |
||||
|
} |
||||
|
|
||||
|
type Point struct { |
||||
|
x int64 |
||||
|
ts int64 |
||||
|
chunk *filer_pb.FileChunk |
||||
|
isStart bool |
||||
|
} |
@ -0,0 +1,210 @@ |
|||||
|
package filer |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
||||
|
"math/rand" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestReadResolvedChunks(t *testing.T) { |
||||
|
|
||||
|
chunks := []*filer_pb.FileChunk{ |
||||
|
{ |
||||
|
FileId: "a", |
||||
|
Offset: 0, |
||||
|
Size: 100, |
||||
|
Mtime: 1, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "b", |
||||
|
Offset: 50, |
||||
|
Size: 100, |
||||
|
Mtime: 2, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "c", |
||||
|
Offset: 200, |
||||
|
Size: 50, |
||||
|
Mtime: 3, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "d", |
||||
|
Offset: 250, |
||||
|
Size: 50, |
||||
|
Mtime: 4, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "e", |
||||
|
Offset: 175, |
||||
|
Size: 100, |
||||
|
Mtime: 5, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
visibles := readResolvedChunks(chunks) |
||||
|
|
||||
|
for _, visible := range visibles { |
||||
|
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestRandomizedReadResolvedChunks(t *testing.T) { |
||||
|
|
||||
|
var limit int64 = 1024*1024 |
||||
|
array := make([]int64, limit) |
||||
|
var chunks []*filer_pb.FileChunk |
||||
|
for ts := int64(0); ts < 1024; ts++ { |
||||
|
x := rand.Int63n(limit) |
||||
|
y := rand.Int63n(limit) |
||||
|
size := x - y |
||||
|
if size < 0 { |
||||
|
size = -size |
||||
|
} |
||||
|
if size > 1024 { |
||||
|
size = 1024 |
||||
|
} |
||||
|
start := x |
||||
|
if start > y { |
||||
|
start = y |
||||
|
} |
||||
|
chunks = append(chunks, randomWrite(array, start, size, ts)) |
||||
|
} |
||||
|
|
||||
|
visibles := readResolvedChunks(chunks) |
||||
|
|
||||
|
for _, visible := range visibles { |
||||
|
for i := visible.start; i<visible.stop;i++{ |
||||
|
if array[i] != visible.modifiedTime { |
||||
|
t.Errorf("position %d expected ts %d actual ts %d", i, array[i], visible.modifiedTime) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// fmt.Printf("visibles %d", len(visibles))
|
||||
|
|
||||
|
} |
||||
|
|
||||
|
func randomWrite(array []int64, start int64, size int64, ts int64) *filer_pb.FileChunk { |
||||
|
for i := start; i < start+size; i++ { |
||||
|
array[i] = ts |
||||
|
} |
||||
|
// fmt.Printf("write [%d,%d) %d\n", start, start+size, ts)
|
||||
|
return &filer_pb.FileChunk{ |
||||
|
FileId: "", |
||||
|
Offset: start, |
||||
|
Size: uint64(size), |
||||
|
Mtime: ts, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestSequentialReadResolvedChunks(t *testing.T) { |
||||
|
|
||||
|
var chunkSize int64 = 1024*1024*2 |
||||
|
var chunks []*filer_pb.FileChunk |
||||
|
for ts := int64(0); ts < 13; ts++ { |
||||
|
chunks = append(chunks, &filer_pb.FileChunk{ |
||||
|
FileId: "", |
||||
|
Offset: chunkSize*ts, |
||||
|
Size: uint64(chunkSize), |
||||
|
Mtime: 1, |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
visibles := readResolvedChunks(chunks) |
||||
|
|
||||
|
fmt.Printf("visibles %d", len(visibles)) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestActualReadResolvedChunks(t *testing.T) { |
||||
|
|
||||
|
chunks := []*filer_pb.FileChunk{ |
||||
|
{ |
||||
|
FileId: "5,e7b96fef48", |
||||
|
Offset: 0, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595823000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "5,e5562640b9", |
||||
|
Offset: 2097152, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595826000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "5,df033e0fe4", |
||||
|
Offset: 4194304, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595827000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "7,eb08148a9b", |
||||
|
Offset: 6291456, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595827000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "7,e0f92d1604", |
||||
|
Offset: 8388608, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595828000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "7,e33cb63262", |
||||
|
Offset: 10485760, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595828000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "5,ea98e40e93", |
||||
|
Offset: 12582912, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595829000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "5,e165661172", |
||||
|
Offset: 14680064, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595829000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "3,e692097486", |
||||
|
Offset: 16777216, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595830000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "3,e28e2e3cbd", |
||||
|
Offset: 18874368, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595830000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "3,e443974d4e", |
||||
|
Offset: 20971520, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595830000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "2,e815bed597", |
||||
|
Offset: 23068672, |
||||
|
Size: 2097152, |
||||
|
Mtime: 1634447487595831000, |
||||
|
}, |
||||
|
{ |
||||
|
FileId: "5,e94715199e", |
||||
|
Offset: 25165824, |
||||
|
Size: 1974736, |
||||
|
Mtime: 1634447487595832000, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
visibles := readResolvedChunks(chunks) |
||||
|
|
||||
|
for _, visible := range visibles { |
||||
|
fmt.Printf("[%d,%d) %s %d\n", visible.start, visible.stop, visible.fileId, visible.modifiedTime) |
||||
|
} |
||||
|
|
||||
|
} |
1333
weed/pb/filer_pb/filer.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,147 @@ |
|||||
|
package s3api |
||||
|
|
||||
|
import ( |
||||
|
"encoding/xml" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// Status represents lifecycle configuration status
|
||||
|
type ruleStatus string |
||||
|
|
||||
|
// Supported status types
|
||||
|
const ( |
||||
|
Enabled ruleStatus = "Enabled" |
||||
|
Disabled ruleStatus = "Disabled" |
||||
|
) |
||||
|
|
||||
|
// Lifecycle - Configuration for bucket lifecycle.
|
||||
|
type Lifecycle struct { |
||||
|
XMLName xml.Name `xml:"LifecycleConfiguration"` |
||||
|
Rules []Rule `xml:"Rule"` |
||||
|
} |
||||
|
|
||||
|
// Rule - a rule for lifecycle configuration.
|
||||
|
type Rule struct { |
||||
|
XMLName xml.Name `xml:"Rule"` |
||||
|
ID string `xml:"ID,omitempty"` |
||||
|
Status ruleStatus `xml:"Status"` |
||||
|
Filter Filter `xml:"Filter,omitempty"` |
||||
|
Prefix Prefix `xml:"Prefix,omitempty"` |
||||
|
Expiration Expiration `xml:"Expiration,omitempty"` |
||||
|
Transition Transition `xml:"Transition,omitempty"` |
||||
|
} |
||||
|
|
||||
|
// Filter - a filter for a lifecycle configuration Rule.
|
||||
|
type Filter struct { |
||||
|
XMLName xml.Name `xml:"Filter"` |
||||
|
set bool |
||||
|
|
||||
|
Prefix Prefix |
||||
|
|
||||
|
And And |
||||
|
andSet bool |
||||
|
|
||||
|
Tag Tag |
||||
|
tagSet bool |
||||
|
} |
||||
|
|
||||
|
// Prefix holds the prefix xml tag in <Rule> and <Filter>
|
||||
|
type Prefix struct { |
||||
|
string |
||||
|
set bool |
||||
|
} |
||||
|
|
||||
|
// MarshalXML - decodes XML data.
|
||||
|
func (p Prefix) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { |
||||
|
if !p.set { |
||||
|
return nil |
||||
|
} |
||||
|
return e.EncodeElement(p.string, startElement) |
||||
|
} |
||||
|
|
||||
|
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { |
||||
|
if err := e.EncodeToken(start); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return e.EncodeToken(xml.EndElement{Name: start.Name}) |
||||
|
} |
||||
|
|
||||
|
// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule.
|
||||
|
type And struct { |
||||
|
XMLName xml.Name `xml:"And"` |
||||
|
Prefix Prefix `xml:"Prefix,omitempty"` |
||||
|
Tags []Tag `xml:"Tag,omitempty"` |
||||
|
} |
||||
|
|
||||
|
// Expiration - expiration actions for a rule in lifecycle configuration.
|
||||
|
type Expiration struct { |
||||
|
XMLName xml.Name `xml:"Expiration"` |
||||
|
Days int `xml:"Days,omitempty"` |
||||
|
Date ExpirationDate `xml:"Date,omitempty"` |
||||
|
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker"` |
||||
|
|
||||
|
set bool |
||||
|
} |
||||
|
|
||||
|
// MarshalXML encodes expiration field into an XML form.
|
||||
|
func (e Expiration) MarshalXML(enc *xml.Encoder, startElement xml.StartElement) error { |
||||
|
if !e.set { |
||||
|
return nil |
||||
|
} |
||||
|
type expirationWrapper Expiration |
||||
|
return enc.EncodeElement(expirationWrapper(e), startElement) |
||||
|
} |
||||
|
|
||||
|
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
|
||||
|
type ExpireDeleteMarker struct { |
||||
|
val bool |
||||
|
set bool |
||||
|
} |
||||
|
|
||||
|
// MarshalXML encodes delete marker boolean into an XML form.
|
||||
|
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { |
||||
|
if !b.set { |
||||
|
return nil |
||||
|
} |
||||
|
return e.EncodeElement(b.val, startElement) |
||||
|
} |
||||
|
|
||||
|
// ExpirationDate is a embedded type containing time.Time to unmarshal
|
||||
|
// Date in Expiration
|
||||
|
type ExpirationDate struct { |
||||
|
time.Time |
||||
|
} |
||||
|
|
||||
|
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
|
// empty string otherwise
|
||||
|
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { |
||||
|
if eDate.Time.IsZero() { |
||||
|
return nil |
||||
|
} |
||||
|
return e.EncodeElement(eDate.Format(time.RFC3339), startElement) |
||||
|
} |
||||
|
|
||||
|
// Transition - transition actions for a rule in lifecycle configuration.
|
||||
|
type Transition struct { |
||||
|
XMLName xml.Name `xml:"Transition"` |
||||
|
Days int `xml:"Days,omitempty"` |
||||
|
Date time.Time `xml:"Date,omitempty"` |
||||
|
StorageClass string `xml:"StorageClass,omitempty"` |
||||
|
|
||||
|
set bool |
||||
|
} |
||||
|
|
||||
|
// MarshalXML encodes transition field into an XML form.
|
||||
|
func (t Transition) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { |
||||
|
if !t.set { |
||||
|
return nil |
||||
|
} |
||||
|
type transitionWrapper Transition |
||||
|
return enc.EncodeElement(transitionWrapper(t), start) |
||||
|
} |
||||
|
|
||||
|
// TransitionDays is a type alias to unmarshal Days in Transition
|
||||
|
type TransitionDays int |
Write
Preview
Loading…
Cancel
Save
Reference in new issue