Browse Source

Merge pull request #3 from chrislusf/master

pull/1823/head
bingoohuang 4 years ago
committed by GitHub
parent
commit
f69356f589
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 22
      .github/workflows/cleanup.yml
  2. 82
      .github/workflows/release.yml
  3. 4
      docker/Dockerfile
  4. 4
      docker/compose/local-mount-compose.yml
  5. 4
      weed/command/filer.go
  6. 2
      weed/command/server.go
  7. 6
      weed/command/webdav.go
  8. 11
      weed/filer.toml
  9. 16
      weed/filer/filer_conf.go
  10. 2
      weed/filer/filer_on_meta_event.go
  11. 1
      weed/pb/grpc_client_server.go
  12. 2
      weed/server/master_grpc_server_volume.go
  13. 2
      weed/server/master_server_handlers.go
  14. 5
      weed/server/webdav_server.go
  15. 4
      weed/shell/command_volume_tier_move.go
  16. 2
      weed/shell/shell_liner.go
  17. 6
      weed/storage/super_block/replica_placement.go
  18. 21
      weed/topology/data_node.go
  19. 19
      weed/topology/disk.go
  20. 22
      weed/topology/volume_growth.go

22
.github/workflows/cleanup.yml

@ -0,0 +1,22 @@
name: Cleanup
on:
push:
branches: [ master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*

82
.github/workflows/release.yml

@ -21,46 +21,42 @@ jobs:
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Delete old release assets
uses: mknejp/delete-release-assets@v1
with:
token: ${{ github.token }}
tag: dev
fail-if-no-assets: false
assets: |
weed-*
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Wait for the deletion
uses: jakejarvis/wait-action@master
with:
time: '30s'
- name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
- name: Go Release Binaries
uses: wangyoucao577/go-release-action@v1.14
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
goos: ${{ matrix.goos }}
goarch: ${{ matrix.goarch }}
release_tag: dev
overwrite: true
ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-
asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"

4
docker/Dockerfile

@ -1,5 +1,7 @@
FROM alpine
ARG RELEASE=latest # 'latest' or 'dev'
RUN \
ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \
@ -13,7 +15,7 @@ RUN \
# Install SeaweedFS and Supercronic ( for cron job mode )
apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \
apk add fuse && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \
tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \
curl -fsSLO "$SUPERCRONIC_URL" && \
echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \

4
docker/compose/local-mount-compose.yml

@ -30,7 +30,7 @@ services:
mount_1:
image: chrislusf/seaweedfs:local
privileged: true
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1" -volumeServerAccess=filerProxy'
entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"'
depends_on:
- master
- volume
@ -38,7 +38,7 @@ services:
mount_2:
image: chrislusf/seaweedfs:local
privileged: true
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1" -volumeServerAcess=publicUrl'
entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
depends_on:
- master
- volume

4
weed/command/filer.go

@ -52,7 +52,7 @@ type FilerOptions struct {
func init() {
cmdFiler.Run = runFiler // break init cycle
f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection")
f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection")
f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address")
f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to")
f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port")
@ -83,6 +83,8 @@ func init() {
filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway")
filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port")
filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files")
filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files")
filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd] hard drive or solid state drive")
filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file")
filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")

2
weed/command/server.go

@ -121,6 +121,8 @@ func init() {
webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port")
webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files")
webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files")
webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd] hard drive or solid state drive")
webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file")
webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")

6
weed/command/webdav.go

@ -25,6 +25,8 @@ type WebDavOption struct {
filer *string
port *int
collection *string
replication *string
disk *string
tlsPrivateKey *string
tlsCertificate *string
cacheDir *string
@ -36,6 +38,8 @@ func init() {
webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address")
webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port")
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files")
webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd] hard drive or solid state drive")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
@ -107,6 +111,8 @@ func (wo *WebDavOption) startWebDav() bool {
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
Collection: *wo.collection,
Replication: *wo.replication,
DiskType: *wo.disk,
Uid: uid,
Gid: gid,
Cipher: cipher,

11
weed/filer.toml

@ -1,11 +0,0 @@
[elastic7]
enabled = true
servers = [
"http://localhost:9200",
]
username = ""
password = ""
sniff_enabled = false
healthcheck_enabled = false
# increase the value is recommend, be sure the value in Elastic is greater or equal here
index.max_result_window = 10000

16
weed/filer/filer_conf.go

@ -46,17 +46,19 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
return fc.LoadFromBytes(entry.Content)
}
return fc.loadFromChunks(filer, entry.Chunks)
return fc.loadFromChunks(filer, entry.Content, entry.Chunks)
}
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) {
if len(content) == 0 {
content, err = filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
return
}
}
return fc.LoadFromBytes(data)
return fc.LoadFromBytes(content)
}
func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {

2
weed/filer/filer_on_meta_event.go

@ -40,7 +40,7 @@ func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks)
err := fc.loadFromChunks(f, entry.Content, entry.Chunks)
if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
return

1
weed/pb/grpc_client_server.go

@ -64,7 +64,6 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr
grpc.WithDefaultCallOptions(
grpc.MaxCallSendMsgSize(Max_Message_Size),
grpc.MaxCallRecvMsgSize(Max_Message_Size),
grpc.WaitForReady(true),
),
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 30 * time.Second, // client ping server if no activity for this long

2
weed/server/master_grpc_server_volume.go

@ -77,7 +77,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
if !ms.Topo.HasWritableVolume(option) {
if ms.Topo.AvailableSpaceFor(option) <= 0 {
return nil, fmt.Errorf("No free volumes left!")
return nil, fmt.Errorf("no free volumes left for "+option.String())
}
ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) {

2
weed/server/master_server_handlers.go

@ -113,7 +113,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
if !ms.Topo.HasWritableVolume(option) {
if ms.Topo.AvailableSpaceFor(option) <= 0 {
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left!"})
writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()})
return
}
ms.vgLock.Lock()

5
weed/server/webdav_server.go

@ -33,6 +33,7 @@ type WebDavOption struct {
BucketsPath string
GrpcDialOption grpc.DialOption
Collection string
Replication string
DiskType string
Uid uint32
Gid uint32
@ -225,7 +226,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f
Uid: fs.option.Uid,
Gid: fs.option.Gid,
Collection: fs.option.Collection,
Replication: "000",
Replication: fs.option.Replication,
TtlSec: 0,
},
},
@ -381,7 +382,7 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
request := &filer_pb.AssignVolumeRequest{
Count: 1,
Replication: "",
Replication: f.fs.option.Replication,
Collection: f.fs.option.Collection,
DiskType: f.fs.option.DiskType,
Path: name,

4
weed/shell/command_volume_tier_move.go

@ -20,11 +20,11 @@ type commandVolumeTierMove struct {
}
func (c *commandVolumeTierMove) Name() string {
return "volume.tier.upload"
return "volume.tier.move"
}
func (c *commandVolumeTierMove) Help() string {
return `change a volume from one disk type to another
return `<WIP> change a volume from one disk type to another
volume.tier.move -source=hdd -target=ssd [-collection=""] [-fullPercent=95] [-quietFor=1h]
volume.tier.move -target=hdd [-collection=""] -volumeId=<volume_id>

2
weed/shell/shell_liner.go

@ -92,7 +92,7 @@ func processEachCmd(reg *regexp.Regexp, cmd string, commandEnv *CommandEnv) bool
func printGenericHelp() {
msg :=
`Type: "help <command>" for help on <command>
`Type: "help <command>" for help on <command>. Most commands support "<command> -h" also for options.
`
fmt.Print(msg)

6
weed/storage/super_block/replica_placement.go

@ -6,9 +6,9 @@ import (
)
type ReplicaPlacement struct {
SameRackCount int
DiffRackCount int
DiffDataCenterCount int
SameRackCount int `json:"node,omitempty"`
DiffRackCount int `json:"rack,omitempty"`
DiffDataCenterCount int `json:"dc,omitempty"`
}
func NewReplicaPlacementFromString(t string) (*ReplicaPlacement, error) {

21
weed/topology/data_node.go

@ -207,7 +207,26 @@ func (dn *DataNode) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Url"] = dn.Url()
ret["PublicUrl"] = dn.PublicUrl
ret["Disks"] = dn.diskUsages.ToMap()
// aggregated volume info
var volumeCount, ecShardCount, maxVolumeCount int64
var volumeIds string
for _, diskUsage := range dn.diskUsages.usages {
volumeCount += diskUsage.volumeCount
ecShardCount += diskUsage.ecShardCount
maxVolumeCount += diskUsage.maxVolumeCount
}
for _, disk := range dn.Children() {
d := disk.(*Disk)
volumeIds += " " + d.GetVolumeIds()
}
ret["Volumes"] = volumeCount
ret["EcShards"] = ecShardCount
ret["Max"] = maxVolumeCount
ret["VolumeIds"] = volumeIds
return ret
}

19
weed/topology/disk.go

@ -58,16 +58,6 @@ func (d *DiskUsages) negative() *DiskUsages {
return t
}
func (d *DiskUsages) ToMap() interface{} {
d.RLock()
defer d.RUnlock()
ret := make(map[string]interface{})
for diskType, diskUsage := range d.usages {
ret[diskType.String()] = diskUsage.ToMap()
}
return ret
}
func (d *DiskUsages) ToDiskInfo() map[string]*master_pb.DiskInfo {
ret := make(map[string]*master_pb.DiskInfo)
for diskType, diskUsageCounts := range d.usages {
@ -135,15 +125,6 @@ func (a *DiskUsageCounts) minus(b *DiskUsageCounts) *DiskUsageCounts {
}
}
func (diskUsage *DiskUsageCounts) ToMap() interface{} {
ret := make(map[string]interface{})
ret["Volumes"] = diskUsage.volumeCount
ret["EcShards"] = diskUsage.ecShardCount
ret["Max"] = diskUsage.maxVolumeCount
ret["Free"] = diskUsage.FreeSpace()
return ret
}
func (du *DiskUsages) getOrCreateDisk(diskType types.DiskType) *DiskUsageCounts {
du.Lock()
defer du.Unlock()

22
weed/topology/volume_growth.go

@ -1,6 +1,7 @@
package topology
import (
"encoding/json"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"math/rand"
@ -25,15 +26,15 @@ This package is created to resolve these replica placement issues:
*/
type VolumeGrowOption struct {
Collection string
ReplicaPlacement *super_block.ReplicaPlacement
Ttl *needle.TTL
DiskType types.DiskType
Prealloacte int64
DataCenter string
Rack string
DataNode string
MemoryMapMaxSizeMb uint32
Collection string `json:"collection,omitempty"`
ReplicaPlacement *super_block.ReplicaPlacement `json:"replication,omitempty"`
Ttl *needle.TTL `json:"ttl,omitempty"`
DiskType types.DiskType `json:"disk,omitempty"`
Prealloacte int64 `json:"prealloacte,omitempty"`
DataCenter string `json:"dataCenter,omitempty"`
Rack string `json:"rack,omitempty"`
DataNode string `json:"dataNode,omitempty"`
MemoryMapMaxSizeMb uint32 `json:"memoryMapMaxSizeMb,omitempty"`
}
type VolumeGrowth struct {
@ -41,7 +42,8 @@ type VolumeGrowth struct {
}
func (o *VolumeGrowOption) String() string {
return fmt.Sprintf("Collection:%s, ReplicaPlacement:%v, Ttl:%v, DataCenter:%s, Rack:%s, DataNode:%s", o.Collection, o.ReplicaPlacement, o.Ttl, o.DataCenter, o.Rack, o.DataNode)
blob, _ := json.Marshal(o)
return string(blob)
}
func NewDefaultVolumeGrowth() *VolumeGrowth {

Loading…
Cancel
Save