Lapshin Vitaliy
3 years ago
20 changed files with 1653 additions and 90 deletions
-
2go.mod
-
5go.sum
-
4weed/command/command.go
-
382weed/command/update.go
-
9weed/command/update_full.go
-
7weed/filer/filechunk_manifest.go
-
8weed/filer/filechunks.go
-
3weed/filer/s3iam_conf.go
-
10weed/pb/s3.proto
-
221weed/pb/s3_pb/s3.pb.go
-
33weed/s3api/auth_credentials_subscribe.go
-
18weed/s3api/s3_constants/s3_config.go
-
182weed/s3api/s3api_circuit_breaker.go
-
97weed/s3api/s3api_circuit_breaker_test.go
-
13weed/s3api/s3api_object_multipart_handlers.go
-
84weed/s3api/s3api_server.go
-
13weed/s3api/s3err/s3api_errors.go
-
358weed/shell/command_s3_circuitbreaker.go
-
292weed/shell/command_s3_circuitbreaker_test.go
-
2weed/storage/needle_map/compact_map.go
@ -0,0 +1,382 @@ |
|||
package command |
|||
|
|||
import ( |
|||
"archive/tar" |
|||
"archive/zip" |
|||
"bytes" |
|||
"compress/gzip" |
|||
"context" |
|||
"crypto/md5" |
|||
"encoding/hex" |
|||
"encoding/json" |
|||
"fmt" |
|||
"io" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"os" |
|||
"path/filepath" |
|||
"runtime" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
"golang.org/x/net/context/ctxhttp" |
|||
) |
|||
|
|||
//copied from https://github.com/restic/restic/tree/master/internal/selfupdate
|
|||
|
|||
// Release collects data about a single release on GitHub.
|
|||
type Release struct { |
|||
Name string `json:"name"` |
|||
TagName string `json:"tag_name"` |
|||
Draft bool `json:"draft"` |
|||
PreRelease bool `json:"prerelease"` |
|||
PublishedAt time.Time `json:"published_at"` |
|||
Assets []Asset `json:"assets"` |
|||
|
|||
Version string `json:"-"` // set manually in the code
|
|||
} |
|||
|
|||
// Asset is a file uploaded and attached to a release.
|
|||
type Asset struct { |
|||
ID int `json:"id"` |
|||
Name string `json:"name"` |
|||
URL string `json:"url"` |
|||
} |
|||
|
|||
const githubAPITimeout = 30 * time.Second |
|||
|
|||
// githubError is returned by the GitHub API, e.g. for rate-limiting.
|
|||
type githubError struct { |
|||
Message string |
|||
} |
|||
|
|||
//default version is not full version
|
|||
var isFullVersion = false |
|||
|
|||
var ( |
|||
updateOpt UpdateOptions |
|||
) |
|||
|
|||
type UpdateOptions struct { |
|||
dir *string |
|||
name *string |
|||
Version *string |
|||
} |
|||
|
|||
func init() { |
|||
path, _ := os.Executable() |
|||
_, name := filepath.Split(path) |
|||
updateOpt.dir = cmdUpdate.Flag.String("dir", filepath.Dir(path), "directory to save new weed.") |
|||
updateOpt.name = cmdUpdate.Flag.String("name", name, "name of new weed. On windows, name shouldn't be same to the orignial name.") |
|||
updateOpt.Version = cmdUpdate.Flag.String("version", "0", "specific version of weed you want to download. If not specified, get the latest version.") |
|||
cmdUpdate.Run = runUpdate |
|||
} |
|||
|
|||
var cmdUpdate = &Command{ |
|||
UsageLine: "update [-dir=/path/to/dir] [-name=name] [-version=x.xx]", |
|||
Short: "get latest or specific version from https://github.com/chrislusf/seaweedfs", |
|||
Long: `get latest or specific version from https://github.com/chrislusf/seaweedfs`, |
|||
} |
|||
|
|||
func runUpdate(cmd *Command, args []string) bool { |
|||
path, _ := os.Executable() |
|||
_, name := filepath.Split(path) |
|||
|
|||
if *updateOpt.dir != "" { |
|||
if err := util.TestFolderWritable(util.ResolvePath(*updateOpt.dir)); err != nil { |
|||
glog.Fatalf("Check Folder(-dir) Writable %s : %s", *updateOpt.dir, err) |
|||
return false |
|||
} |
|||
} else { |
|||
*updateOpt.dir = filepath.Dir(path) |
|||
} |
|||
|
|||
if *updateOpt.name == "" { |
|||
*updateOpt.name = name |
|||
} |
|||
|
|||
target := filepath.Join(*updateOpt.dir, *updateOpt.name) |
|||
|
|||
if runtime.GOOS == "windows" { |
|||
if target == path { |
|||
glog.Fatalf("On windows, name of the new weed shouldn't be same to the orignial name.") |
|||
return false |
|||
} |
|||
} |
|||
|
|||
glog.V(0).Infof("new weed will be saved to %s", target) |
|||
|
|||
_, err := downloadRelease(context.Background(), target, *updateOpt.Version) |
|||
if err != nil { |
|||
glog.Errorf("unable to download weed: %v", err) |
|||
return false |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func downloadRelease(ctx context.Context, target string, ver string) (version string, err error) { |
|||
currentVersion := util.VERSION_NUMBER |
|||
rel, err := GitHubLatestRelease(ctx, ver, "chrislusf", "seaweedfs") |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
if rel.Version == currentVersion { |
|||
if ver == "0" { |
|||
glog.V(0).Infof("weed is up to date") |
|||
} else { |
|||
glog.V(0).Infof("no need to download the same version of weed ") |
|||
} |
|||
return currentVersion, nil |
|||
} |
|||
|
|||
glog.V(0).Infof("download version: %s", rel.Version) |
|||
|
|||
largeDiskSuffix := "" |
|||
if util.VolumeSizeLimitGB == 8000 { |
|||
largeDiskSuffix = "_large_disk" |
|||
} |
|||
|
|||
fullSuffix := "" |
|||
if isFullVersion { |
|||
fullSuffix = "_full" |
|||
} |
|||
|
|||
ext := "tar.gz" |
|||
if runtime.GOOS == "windows" { |
|||
ext = "zip" |
|||
} |
|||
|
|||
suffix := fmt.Sprintf("%s_%s%s%s.%s", runtime.GOOS, runtime.GOARCH, fullSuffix, largeDiskSuffix, ext) |
|||
md5Filename := fmt.Sprintf("%s.md5", suffix) |
|||
_, md5Val, err := getGithubDataFile(ctx, rel.Assets, md5Filename) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
downloadFilename, buf, err := getGithubDataFile(ctx, rel.Assets, suffix) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
|
|||
md5Ctx := md5.New() |
|||
md5Ctx.Write(buf) |
|||
binaryMd5 := md5Ctx.Sum(nil) |
|||
if hex.EncodeToString(binaryMd5) != string(md5Val[0:32]) { |
|||
glog.Errorf("md5:'%s' '%s'", hex.EncodeToString(binaryMd5), string(md5Val[0:32])) |
|||
err = fmt.Errorf("binary md5sum doesn't match") |
|||
return "", err |
|||
} |
|||
|
|||
err = extractToFile(buf, downloadFilename, target) |
|||
if err != nil { |
|||
return "", err |
|||
} else { |
|||
glog.V(0).Infof("successfully updated weed to version %v\n", rel.Version) |
|||
} |
|||
|
|||
return rel.Version, nil |
|||
} |
|||
|
|||
// GitHubLatestRelease uses the GitHub API to get information about the specific
|
|||
// release of a repository.
|
|||
func GitHubLatestRelease(ctx context.Context, ver string, owner, repo string) (Release, error) { |
|||
ctx, cancel := context.WithTimeout(ctx, githubAPITimeout) |
|||
defer cancel() |
|||
|
|||
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases", owner, repo) |
|||
req, err := http.NewRequest(http.MethodGet, url, nil) |
|||
if err != nil { |
|||
return Release{}, err |
|||
} |
|||
|
|||
// pin API version 3
|
|||
req.Header.Set("Accept", "application/vnd.github.v3+json") |
|||
|
|||
res, err := ctxhttp.Do(ctx, http.DefaultClient, req) |
|||
if err != nil { |
|||
return Release{}, err |
|||
} |
|||
|
|||
if res.StatusCode != http.StatusOK { |
|||
content := res.Header.Get("Content-Type") |
|||
if strings.Contains(content, "application/json") { |
|||
// try to decode error message
|
|||
var msg githubError |
|||
jerr := json.NewDecoder(res.Body).Decode(&msg) |
|||
if jerr == nil { |
|||
return Release{}, fmt.Errorf("unexpected status %v (%v) returned, message:\n %v", res.StatusCode, res.Status, msg.Message) |
|||
} |
|||
} |
|||
|
|||
_ = res.Body.Close() |
|||
return Release{}, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) |
|||
} |
|||
|
|||
buf, err := ioutil.ReadAll(res.Body) |
|||
if err != nil { |
|||
_ = res.Body.Close() |
|||
return Release{}, err |
|||
} |
|||
|
|||
err = res.Body.Close() |
|||
if err != nil { |
|||
return Release{}, err |
|||
} |
|||
|
|||
var release Release |
|||
var releaseList []Release |
|||
err = json.Unmarshal(buf, &releaseList) |
|||
if err != nil { |
|||
return Release{}, err |
|||
} |
|||
if ver == "0" { |
|||
release = releaseList[0] |
|||
glog.V(0).Infof("latest version is %v\n", release.TagName) |
|||
} else { |
|||
for _, r := range releaseList { |
|||
if r.TagName == ver { |
|||
release = r |
|||
break |
|||
} |
|||
} |
|||
} |
|||
|
|||
if release.TagName == "" { |
|||
return Release{}, fmt.Errorf("can not find the specific version") |
|||
} |
|||
|
|||
release.Version = release.TagName |
|||
return release, nil |
|||
} |
|||
|
|||
func getGithubData(ctx context.Context, url string) ([]byte, error) { |
|||
req, err := http.NewRequest(http.MethodGet, url, nil) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// request binary data
|
|||
req.Header.Set("Accept", "application/octet-stream") |
|||
|
|||
res, err := ctxhttp.Do(ctx, http.DefaultClient, req) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if res.StatusCode != http.StatusOK { |
|||
return nil, fmt.Errorf("unexpected status %v (%v) returned", res.StatusCode, res.Status) |
|||
} |
|||
|
|||
buf, err := ioutil.ReadAll(res.Body) |
|||
if err != nil { |
|||
_ = res.Body.Close() |
|||
return nil, err |
|||
} |
|||
|
|||
err = res.Body.Close() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return buf, nil |
|||
} |
|||
|
|||
func getGithubDataFile(ctx context.Context, assets []Asset, suffix string) (filename string, data []byte, err error) { |
|||
var url string |
|||
for _, a := range assets { |
|||
if strings.HasSuffix(a.Name, suffix) { |
|||
url = a.URL |
|||
filename = a.Name |
|||
break |
|||
} |
|||
} |
|||
|
|||
if url == "" { |
|||
return "", nil, fmt.Errorf("unable to find file with suffix %v", suffix) |
|||
} |
|||
|
|||
glog.V(0).Infof("download %v\n", filename) |
|||
data, err = getGithubData(ctx, url) |
|||
if err != nil { |
|||
return "", nil, err |
|||
} |
|||
|
|||
return filename, data, nil |
|||
} |
|||
|
|||
func extractToFile(buf []byte, filename, target string) error { |
|||
var rd io.Reader = bytes.NewReader(buf) |
|||
|
|||
switch filepath.Ext(filename) { |
|||
case ".gz": |
|||
gr, err := gzip.NewReader(rd) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer gr.Close() |
|||
trd := tar.NewReader(gr) |
|||
hdr, terr := trd.Next() |
|||
if terr != nil { |
|||
glog.Errorf("uncompress file(%s) failed:%s", hdr.Name, terr) |
|||
return terr |
|||
} |
|||
rd = trd |
|||
case ".zip": |
|||
zrd, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if len(zrd.File) != 1 { |
|||
return fmt.Errorf("ZIP archive contains more than one file") |
|||
} |
|||
|
|||
file, err := zrd.File[0].Open() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
defer func() { |
|||
_ = file.Close() |
|||
}() |
|||
|
|||
rd = file |
|||
} |
|||
|
|||
// Write everything to a temp file
|
|||
dir := filepath.Dir(target) |
|||
new, err := ioutil.TempFile(dir, "weed") |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
n, err := io.Copy(new, rd) |
|||
if err != nil { |
|||
_ = new.Close() |
|||
_ = os.Remove(new.Name()) |
|||
return err |
|||
} |
|||
if err = new.Sync(); err != nil { |
|||
return err |
|||
} |
|||
if err = new.Close(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
mode := os.FileMode(0755) |
|||
// attempt to find the original mode
|
|||
if fi, err := os.Lstat(target); err == nil { |
|||
mode = fi.Mode() |
|||
} |
|||
|
|||
// Rename the temp file to the final location atomically.
|
|||
if err := os.Rename(new.Name(), target); err != nil { |
|||
return err |
|||
} |
|||
|
|||
glog.V(0).Infof("saved %d bytes in %v\n", n, target) |
|||
return os.Chmod(target, mode) |
|||
} |
@ -0,0 +1,9 @@ |
|||
//go:build elastic && ydb && gocdk && hdfs
|
|||
// +build elastic,ydb,gocdk,hdfs
|
|||
|
|||
package command |
|||
|
|||
//set true if gtags are set
|
|||
func init() { |
|||
isFullVersion = true |
|||
} |
@ -0,0 +1,18 @@ |
|||
package s3_constants |
|||
|
|||
import ( |
|||
"strings" |
|||
) |
|||
|
|||
var ( |
|||
CircuitBreakerConfigDir = "/etc/s3" |
|||
CircuitBreakerConfigFile = "circuit_breaker.json" |
|||
AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN} |
|||
LimitTypeCount = "Count" |
|||
LimitTypeBytes = "MB" |
|||
Separator = ":" |
|||
) |
|||
|
|||
func Concat(elements ...string) string { |
|||
return strings.Join(elements, Separator) |
|||
} |
@ -0,0 +1,182 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" |
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" |
|||
"github.com/gorilla/mux" |
|||
"net/http" |
|||
"sync" |
|||
"sync/atomic" |
|||
) |
|||
|
|||
type CircuitBreaker struct { |
|||
sync.Mutex |
|||
Enabled bool |
|||
counters map[string]*int64 |
|||
limitations map[string]int64 |
|||
} |
|||
|
|||
func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { |
|||
cb := &CircuitBreaker{ |
|||
counters: make(map[string]*int64), |
|||
limitations: make(map[string]int64), |
|||
} |
|||
|
|||
err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { |
|||
content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile) |
|||
if err != nil { |
|||
return fmt.Errorf("read S3 circuit breaker config: %v", err) |
|||
} |
|||
return cb.LoadS3ApiConfigurationFromBytes(content) |
|||
}) |
|||
|
|||
if err != nil { |
|||
glog.Warningf("fail to load config: %v", err) |
|||
} |
|||
|
|||
return cb |
|||
} |
|||
|
|||
func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error { |
|||
cbCfg := &s3_pb.S3CircuitBreakerConfig{} |
|||
if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil { |
|||
glog.Warningf("unmarshal error: %v", err) |
|||
return fmt.Errorf("unmarshal error: %v", err) |
|||
} |
|||
if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error { |
|||
|
|||
//global
|
|||
globalEnabled := false |
|||
globalOptions := cfg.Global |
|||
limitations := make(map[string]int64) |
|||
if globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 { |
|||
globalEnabled = globalOptions.Enabled |
|||
for action, limit := range globalOptions.Actions { |
|||
limitations[action] = limit |
|||
} |
|||
} |
|||
cb.Enabled = globalEnabled |
|||
|
|||
//buckets
|
|||
for bucket, cbOptions := range cfg.Buckets { |
|||
if cbOptions.Enabled { |
|||
for action, limit := range cbOptions.Actions { |
|||
limitations[s3_constants.Concat(bucket, action)] = limit |
|||
} |
|||
} |
|||
} |
|||
|
|||
cb.limitations = limitations |
|||
return nil |
|||
} |
|||
|
|||
func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) { |
|||
return func(w http.ResponseWriter, r *http.Request) { |
|||
if !cb.Enabled { |
|||
f(w, r) |
|||
return |
|||
} |
|||
|
|||
vars := mux.Vars(r) |
|||
bucket := vars["bucket"] |
|||
|
|||
rollback, errCode := cb.limit(r, bucket, action) |
|||
defer func() { |
|||
for _, rf := range rollback { |
|||
rf() |
|||
} |
|||
}() |
|||
|
|||
if errCode == s3err.ErrNone { |
|||
f(w, r) |
|||
return |
|||
} |
|||
s3err.WriteErrorResponse(w, r, errCode) |
|||
}, Action(action) |
|||
} |
|||
|
|||
func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) { |
|||
|
|||
//bucket simultaneous request count
|
|||
bucketCountRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest) |
|||
if bucketCountRollBack != nil { |
|||
rollback = append(rollback, bucketCountRollBack) |
|||
} |
|||
if errCode != s3err.ErrNone { |
|||
return |
|||
} |
|||
|
|||
//bucket simultaneous request content bytes
|
|||
bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed) |
|||
if bucketContentLengthRollBack != nil { |
|||
rollback = append(rollback, bucketContentLengthRollBack) |
|||
} |
|||
if errCode != s3err.ErrNone { |
|||
return |
|||
} |
|||
|
|||
//global simultaneous request count
|
|||
globalCountRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest) |
|||
if globalCountRollBack != nil { |
|||
rollback = append(rollback, globalCountRollBack) |
|||
} |
|||
if errCode != s3err.ErrNone { |
|||
return |
|||
} |
|||
|
|||
//global simultaneous request content bytes
|
|||
globalContentLengthRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed) |
|||
if globalContentLengthRollBack != nil { |
|||
rollback = append(rollback, globalContentLengthRollBack) |
|||
} |
|||
if errCode != s3err.ErrNone { |
|||
return |
|||
} |
|||
return |
|||
} |
|||
|
|||
func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) { |
|||
key := s3_constants.Concat(bucket, action, limitType) |
|||
e = s3err.ErrNone |
|||
if max, ok := cb.limitations[key]; ok { |
|||
counter, exists := cb.counters[key] |
|||
if !exists { |
|||
cb.Lock() |
|||
counter, exists = cb.counters[key] |
|||
if !exists { |
|||
var newCounter int64 |
|||
counter = &newCounter |
|||
cb.counters[key] = counter |
|||
} |
|||
cb.Unlock() |
|||
} |
|||
current := atomic.LoadInt64(counter) |
|||
if current+inc > max { |
|||
e = errCode |
|||
return |
|||
} else { |
|||
current := atomic.AddInt64(counter, inc) |
|||
f = func() { |
|||
atomic.AddInt64(counter, -inc) |
|||
} |
|||
current = atomic.LoadInt64(counter) |
|||
if current > max { |
|||
e = errCode |
|||
return |
|||
} |
|||
} |
|||
} |
|||
return |
|||
} |
@ -0,0 +1,97 @@ |
|||
package s3api |
|||
|
|||
import ( |
|||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" |
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3err" |
|||
"net/http" |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
) |
|||
|
|||
type TestLimitCase struct { |
|||
actionName string |
|||
limitType string |
|||
bucketLimitValue int64 |
|||
globalLimitValue int64 |
|||
|
|||
routineCount int |
|||
reqBytes int64 |
|||
|
|||
successCount int64 |
|||
} |
|||
|
|||
var ( |
|||
bucket = "/test" |
|||
action = s3_constants.ACTION_READ |
|||
TestLimitCases = []*TestLimitCase{ |
|||
{action, s3_constants.LimitTypeCount, 5, 5, 6, 1024, 5}, |
|||
{action, s3_constants.LimitTypeCount, 6, 6, 6, 1024, 6}, |
|||
{action, s3_constants.LimitTypeCount, 5, 6, 6, 1024, 5}, |
|||
{action, s3_constants.LimitTypeBytes, 1024, 1024, 6, 200, 5}, |
|||
{action, s3_constants.LimitTypeBytes, 1200, 1200, 6, 200, 6}, |
|||
{action, s3_constants.LimitTypeBytes, 11990, 11990, 60, 200, 59}, |
|||
{action, s3_constants.LimitTypeBytes, 11790, 11990, 70, 200, 58}, |
|||
} |
|||
) |
|||
|
|||
func TestLimit(t *testing.T) { |
|||
for _, tc := range TestLimitCases { |
|||
circuitBreakerConfig := &s3_pb.S3CircuitBreakerConfig{ |
|||
Global: &s3_pb.S3CircuitBreakerOptions{ |
|||
Enabled: true, |
|||
Actions: map[string]int64{ |
|||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue, |
|||
s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue, |
|||
}, |
|||
}, |
|||
Buckets: map[string]*s3_pb.S3CircuitBreakerOptions{ |
|||
bucket: { |
|||
Enabled: true, |
|||
Actions: map[string]int64{ |
|||
s3_constants.Concat(tc.actionName, tc.limitType): tc.bucketLimitValue, |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
circuitBreaker := &CircuitBreaker{ |
|||
counters: make(map[string]*int64), |
|||
limitations: make(map[string]int64), |
|||
} |
|||
err := circuitBreaker.loadCircuitBreakerConfig(circuitBreakerConfig) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: tc.reqBytes}) |
|||
if successCount != tc.successCount { |
|||
t.Errorf("successCount not equal, expect=%d, actual=%d", tc.successCount, successCount) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request) int64 { |
|||
var successCounter int64 |
|||
resultCh := make(chan []func(), routineCount) |
|||
var wg sync.WaitGroup |
|||
for i := 0; i < routineCount; i++ { |
|||
wg.Add(1) |
|||
go func() { |
|||
defer wg.Done() |
|||
rollbackFn, errCode := circuitBreaker.limit(r, bucket, action) |
|||
if errCode == s3err.ErrNone { |
|||
atomic.AddInt64(&successCounter, 1) |
|||
} |
|||
resultCh <- rollbackFn |
|||
}() |
|||
} |
|||
wg.Wait() |
|||
close(resultCh) |
|||
for fns := range resultCh { |
|||
for _, fn := range fns { |
|||
fn() |
|||
} |
|||
} |
|||
return successCounter |
|||
} |
@ -0,0 +1,358 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"bytes" |
|||
"flag" |
|||
"fmt" |
|||
"github.com/chrislusf/seaweedfs/weed/filer" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" |
|||
"io" |
|||
"strconv" |
|||
"strings" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
) |
|||
|
|||
var LoadConfig = loadConfig |
|||
|
|||
func init() { |
|||
Commands = append(Commands, &commandS3CircuitBreaker{}) |
|||
} |
|||
|
|||
type commandS3CircuitBreaker struct { |
|||
} |
|||
|
|||
func (c *commandS3CircuitBreaker) Name() string { |
|||
return "s3.circuitBreaker" |
|||
} |
|||
|
|||
func (c *commandS3CircuitBreaker) Help() string { |
|||
return `configure and apply s3 circuit breaker options for each bucket |
|||
|
|||
# examples |
|||
# add circuit breaker config for global |
|||
s3.circuitBreaker -global -type count -actions Read,Write -values 500,200 -apply |
|||
|
|||
# disable global config |
|||
s3.circuitBreaker -global -disable -apply |
|||
|
|||
# add circuit breaker config for buckets x,y,z |
|||
s3.circuitBreaker -buckets x,y,z -type count -actions Read,Write -values 200,100 -apply |
|||
|
|||
# disable circuit breaker config of x |
|||
s3.circuitBreaker -buckets x -disable -apply |
|||
|
|||
# delete circuit breaker config of x |
|||
s3.circuitBreaker -buckets x -delete -apply |
|||
|
|||
# clear all circuit breaker config |
|||
s3.circuitBreaker -delete -apply |
|||
` |
|||
} |
|||
|
|||
func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { |
|||
dir := s3_constants.CircuitBreakerConfigDir |
|||
file := s3_constants.CircuitBreakerConfigFile |
|||
|
|||
s3CircuitBreakerCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) |
|||
buckets := s3CircuitBreakerCommand.String("buckets", "", "the bucket name(s) to configure, eg: -buckets x,y,z") |
|||
global := s3CircuitBreakerCommand.Bool("global", false, "configure global circuit breaker") |
|||
|
|||
actions := s3CircuitBreakerCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin") |
|||
limitType := s3CircuitBreakerCommand.String("type", "", "'Count' or 'MB'; Count represents the number of simultaneous requests, and MB represents the content size of all simultaneous requests") |
|||
values := s3CircuitBreakerCommand.String("values", "", "comma separated values") |
|||
|
|||
disabled := s3CircuitBreakerCommand.Bool("disable", false, "disable global or buckets circuit breaker") |
|||
deleted := s3CircuitBreakerCommand.Bool("delete", false, "delete circuit breaker config") |
|||
|
|||
apply := s3CircuitBreakerCommand.Bool("apply", false, "update and apply current configuration") |
|||
|
|||
if err = s3CircuitBreakerCommand.Parse(args); err != nil { |
|||
return nil |
|||
|
|||
} |
|||
|
|||
var buf bytes.Buffer |
|||
err = LoadConfig(commandEnv, dir, file, &buf) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
cbCfg := &s3_pb.S3CircuitBreakerConfig{ |
|||
Buckets: make(map[string]*s3_pb.S3CircuitBreakerOptions), |
|||
} |
|||
if buf.Len() > 0 { |
|||
if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), cbCfg); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
if *deleted { |
|||
cmdBuckets, cmdActions, _, err := c.initActionsAndValues(buckets, actions, limitType, values, true) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if len(cmdBuckets) <= 0 && !*global { |
|||
if len(cmdActions) > 0 { |
|||
deleteGlobalActions(cbCfg, cmdActions, limitType) |
|||
if cbCfg.Buckets != nil { |
|||
var allBuckets []string |
|||
for bucket := range cbCfg.Buckets { |
|||
allBuckets = append(allBuckets, bucket) |
|||
} |
|||
deleteBucketsActions(allBuckets, cbCfg, cmdActions, limitType) |
|||
} |
|||
} else { |
|||
cbCfg.Global = nil |
|||
cbCfg.Buckets = nil |
|||
} |
|||
} else { |
|||
if len(cmdBuckets) > 0 { |
|||
deleteBucketsActions(cmdBuckets, cbCfg, cmdActions, limitType) |
|||
} |
|||
if *global { |
|||
deleteGlobalActions(cbCfg, cmdActions, nil) |
|||
} |
|||
} |
|||
} else { |
|||
cmdBuckets, cmdActions, cmdValues, err := c.initActionsAndValues(buckets, actions, limitType, values, *disabled) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if len(cmdActions) > 0 && len(*buckets) <= 0 && !*global { |
|||
return fmt.Errorf("one of -global and -buckets must be specified") |
|||
} |
|||
|
|||
if len(*buckets) > 0 { |
|||
for _, bucket := range cmdBuckets { |
|||
var cbOptions *s3_pb.S3CircuitBreakerOptions |
|||
var exists bool |
|||
if cbOptions, exists = cbCfg.Buckets[bucket]; !exists { |
|||
cbOptions = &s3_pb.S3CircuitBreakerOptions{} |
|||
cbCfg.Buckets[bucket] = cbOptions |
|||
} |
|||
cbOptions.Enabled = !*disabled |
|||
|
|||
if len(cmdActions) > 0 { |
|||
err = insertOrUpdateValues(cbOptions, cmdActions, cmdValues, limitType) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
if len(cbOptions.Actions) <= 0 && !cbOptions.Enabled { |
|||
delete(cbCfg.Buckets, bucket) |
|||
} |
|||
} |
|||
} |
|||
|
|||
if *global { |
|||
globalOptions := cbCfg.Global |
|||
if globalOptions == nil { |
|||
globalOptions = &s3_pb.S3CircuitBreakerOptions{Actions: make(map[string]int64, len(cmdActions))} |
|||
cbCfg.Global = globalOptions |
|||
} |
|||
globalOptions.Enabled = !*disabled |
|||
|
|||
if len(cmdActions) > 0 { |
|||
err = insertOrUpdateValues(globalOptions, cmdActions, cmdValues, limitType) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
if len(globalOptions.Actions) <= 0 && !globalOptions.Enabled { |
|||
cbCfg.Global = nil |
|||
} |
|||
} |
|||
} |
|||
|
|||
buf.Reset() |
|||
err = filer.ProtoToText(&buf, cbCfg) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
_, _ = fmt.Fprintf(writer, string(buf.Bytes())) |
|||
_, _ = fmt.Fprintln(writer) |
|||
|
|||
if *apply { |
|||
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { |
|||
return filer.SaveInsideFiler(client, dir, file, buf.Bytes()) |
|||
}); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func loadConfig(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error { |
|||
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { |
|||
return filer.ReadEntry(commandEnv.MasterClient, client, dir, file, buf) |
|||
}); err != nil && err != filer_pb.ErrNotFound { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func insertOrUpdateValues(cbOptions *s3_pb.S3CircuitBreakerOptions, cmdActions []string, cmdValues []int64, limitType *string) error { |
|||
if len(*limitType) == 0 { |
|||
return fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed") |
|||
} |
|||
|
|||
if cbOptions.Actions == nil { |
|||
cbOptions.Actions = make(map[string]int64, len(cmdActions)) |
|||
} |
|||
|
|||
if len(cmdValues) > 0 { |
|||
for i, action := range cmdActions { |
|||
cbOptions.Actions[s3_constants.Concat(action, *limitType)] = cmdValues[i] |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func deleteBucketsActions(cmdBuckets []string, cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) { |
|||
if cbCfg.Buckets == nil { |
|||
return |
|||
} |
|||
|
|||
if len(cmdActions) == 0 { |
|||
for _, bucket := range cmdBuckets { |
|||
delete(cbCfg.Buckets, bucket) |
|||
} |
|||
} else { |
|||
for _, bucket := range cmdBuckets { |
|||
if cbOption, ok := cbCfg.Buckets[bucket]; ok { |
|||
if len(cmdActions) > 0 && cbOption.Actions != nil { |
|||
for _, action := range cmdActions { |
|||
delete(cbOption.Actions, s3_constants.Concat(action, *limitType)) |
|||
} |
|||
} |
|||
|
|||
if len(cbOption.Actions) == 0 && !cbOption.Enabled { |
|||
delete(cbCfg.Buckets, bucket) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
if len(cbCfg.Buckets) == 0 { |
|||
cbCfg.Buckets = nil |
|||
} |
|||
} |
|||
|
|||
func deleteGlobalActions(cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) { |
|||
globalOptions := cbCfg.Global |
|||
if globalOptions == nil { |
|||
return |
|||
} |
|||
|
|||
if len(cmdActions) == 0 && globalOptions.Actions != nil { |
|||
globalOptions.Actions = nil |
|||
return |
|||
} else { |
|||
for _, action := range cmdActions { |
|||
delete(globalOptions.Actions, s3_constants.Concat(action, *limitType)) |
|||
} |
|||
} |
|||
|
|||
if len(globalOptions.Actions) == 0 && !globalOptions.Enabled { |
|||
cbCfg.Global = nil |
|||
} |
|||
} |
|||
|
|||
func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitType, values *string, parseValues bool) (cmdBuckets, cmdActions []string, cmdValues []int64, err error) { |
|||
if len(*buckets) > 0 { |
|||
cmdBuckets = strings.Split(*buckets, ",") |
|||
} |
|||
|
|||
if len(*actions) > 0 { |
|||
cmdActions = strings.Split(*actions, ",") |
|||
|
|||
//check action valid
|
|||
for _, action := range cmdActions { |
|||
var found bool |
|||
for _, allowedAction := range s3_constants.AllowedActions { |
|||
if allowedAction == action { |
|||
found = true |
|||
} |
|||
} |
|||
if !found { |
|||
return nil, nil, nil, fmt.Errorf("value(%s) of flag[-action] not valid, allowed actions: %v", *actions, s3_constants.AllowedActions) |
|||
} |
|||
} |
|||
} |
|||
|
|||
if !parseValues { |
|||
if len(cmdActions) < 0 { |
|||
for _, action := range s3_constants.AllowedActions { |
|||
cmdActions = append(cmdActions, action) |
|||
} |
|||
} |
|||
|
|||
if len(*limitType) > 0 { |
|||
switch *limitType { |
|||
case s3_constants.LimitTypeCount: |
|||
elements := strings.Split(*values, ",") |
|||
if len(cmdActions) != len(elements) { |
|||
if len(elements) != 1 || len(elements) == 0 { |
|||
return nil, nil, nil, fmt.Errorf("count of flag[-actions] and flag[-counts] not equal") |
|||
} |
|||
v, err := strconv.Atoi(elements[0]) |
|||
if err != nil { |
|||
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)") |
|||
} |
|||
for range cmdActions { |
|||
cmdValues = append(cmdValues, int64(v)) |
|||
} |
|||
} else { |
|||
for _, value := range elements { |
|||
v, err := strconv.Atoi(value) |
|||
if err != nil { |
|||
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)") |
|||
} |
|||
cmdValues = append(cmdValues, int64(v)) |
|||
} |
|||
} |
|||
case s3_constants.LimitTypeBytes: |
|||
elements := strings.Split(*values, ",") |
|||
if len(cmdActions) != len(elements) { |
|||
if len(elements) != 1 || len(elements) == 0 { |
|||
return nil, nil, nil, fmt.Errorf("values count of -actions and -values not equal") |
|||
} |
|||
v, err := parseMBToBytes(elements[0]) |
|||
if err != nil { |
|||
return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)") |
|||
} |
|||
for range cmdActions { |
|||
cmdValues = append(cmdValues, v) |
|||
} |
|||
} else { |
|||
for _, value := range elements { |
|||
v, err := parseMBToBytes(value) |
|||
if err != nil { |
|||
return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)") |
|||
} |
|||
cmdValues = append(cmdValues, v) |
|||
} |
|||
} |
|||
default: |
|||
return nil, nil, nil, fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed") |
|||
} |
|||
} else { |
|||
*limitType = "" |
|||
} |
|||
} |
|||
return cmdBuckets, cmdActions, cmdValues, nil |
|||
} |
|||
|
|||
func parseMBToBytes(valStr string) (int64, error) { |
|||
v, err := strconv.Atoi(valStr) |
|||
v *= 1024 * 1024 |
|||
return int64(v), err |
|||
} |
@ -0,0 +1,292 @@ |
|||
package shell |
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/json" |
|||
"reflect" |
|||
"strings" |
|||
"testing" |
|||
) |
|||
|
|||
type Case struct { |
|||
args []string |
|||
result string |
|||
} |
|||
|
|||
var ( |
|||
TestCases = []*Case{ |
|||
//add circuit breaker config for global
|
|||
{ |
|||
args: strings.Split("-global -type Count -actions Read,Write -values 500,200", " "), |
|||
result: `{ |
|||
"global": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//disable global config
|
|||
{ |
|||
args: strings.Split("-global -disable", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//add circuit breaker config for buckets x,y,z
|
|||
{ |
|||
args: strings.Split("-buckets x,y,z -type Count -actions Read,Write -values 200,100", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"x": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//disable circuit breaker config of x
|
|||
{ |
|||
args: strings.Split("-buckets x -disable", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"x": { |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//delete circuit breaker config of x
|
|||
{ |
|||
args: strings.Split("-buckets x -delete", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//configure the circuit breaker for the size of the uploaded file for bucket x,y
|
|||
{ |
|||
args: strings.Split("-buckets x,y -type MB -actions Write -values 1024", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"x": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Write:MB": "1073741824" |
|||
} |
|||
}, |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100", |
|||
"Write:MB": "1073741824" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//delete the circuit breaker configuration for the size of the uploaded file of bucket x,y
|
|||
{ |
|||
args: strings.Split("-buckets x,y -type MB -actions Write -delete", " "), |
|||
result: `{ |
|||
"global": { |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"x": { |
|||
"enabled": true |
|||
}, |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//enable global circuit breaker config (without -disable flag)
|
|||
{ |
|||
args: strings.Split("-global", " "), |
|||
result: `{ |
|||
"global": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "500", |
|||
"Write:Count": "200" |
|||
} |
|||
}, |
|||
"buckets": { |
|||
"x": { |
|||
"enabled": true |
|||
}, |
|||
"y": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
}, |
|||
"z": { |
|||
"enabled": true, |
|||
"actions": { |
|||
"Read:Count": "200", |
|||
"Write:Count": "100" |
|||
} |
|||
} |
|||
} |
|||
}`, |
|||
}, |
|||
|
|||
//clear all circuit breaker config
|
|||
{ |
|||
args: strings.Split("-delete", " "), |
|||
result: `{ |
|||
|
|||
}`, |
|||
}, |
|||
} |
|||
) |
|||
|
|||
func TestCircuitBreakerShell(t *testing.T) { |
|||
var writeBuf bytes.Buffer |
|||
cmd := &commandS3CircuitBreaker{} |
|||
LoadConfig = func(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error { |
|||
_, err := buf.Write(writeBuf.Bytes()) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
writeBuf.Reset() |
|||
return nil |
|||
} |
|||
|
|||
for i, tc := range TestCases { |
|||
err := cmd.Do(tc.args, nil, &writeBuf) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if i != 0 { |
|||
result := writeBuf.String() |
|||
|
|||
actual := make(map[string]interface{}) |
|||
err := json.Unmarshal([]byte(result), &actual) |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
|
|||
expect := make(map[string]interface{}) |
|||
err = json.Unmarshal([]byte(result), &expect) |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
if !reflect.DeepEqual(actual, expect) { |
|||
t.Fatal("result of s3 circuit breaker shell command is unexpect!") |
|||
} |
|||
} |
|||
} |
|||
} |
Write
Preview
Loading…
Cancel
Save
Reference in new issue