Browse Source

go fmt

pull/1778/head
Chris Lu 4 years ago
parent
commit
d475c89fcc
  1. 30
      weed/command/filer_sync.go
  2. 10
      weed/command/mount.go
  3. 20
      weed/command/mount_std.go
  4. 2
      weed/filesys/dir.go
  5. 18
      weed/filesys/dirty_page.go
  6. 2
      weed/server/filer_server_handlers.go

30
weed/command/filer_sync.go

@ -20,21 +20,21 @@ import (
)
type SyncOptions struct {
isActivePassive *bool
filerA *string
filerB *string
aPath *string
bPath *string
aReplication *string
bReplication *string
aCollection *string
bCollection *string
aTtlSec *int
bTtlSec *int
aDebug *bool
bDebug *bool
aProxyByFiler *bool
bProxyByFiler *bool
isActivePassive *bool
filerA *string
filerB *string
aPath *string
bPath *string
aReplication *string
bReplication *string
aCollection *string
bCollection *string
aTtlSec *int
bTtlSec *int
aDebug *bool
bDebug *bool
aProxyByFiler *bool
bProxyByFiler *bool
}
var (

10
weed/command/mount.go

@ -6,11 +6,11 @@ import (
)
type MountOptions struct {
filer *string
filerMountRootPath *string
dir *string
dirAutoCreate *bool
collection *string
filer *string
filerMountRootPath *string
dir *string
dirAutoCreate *bool
collection *string
replication *string
ttlSec *int
chunkSizeLimitMB *int

20
weed/command/mount_std.go

@ -169,16 +169,16 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
}
seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{
MountDirectory: dir,
FilerAddress: filer,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
MountDirectory: dir,
FilerAddress: filer,
FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: grpcDialOption,
FilerMountRootPath: mountRoot,
Collection: *option.collection,
Replication: *option.replication,
TtlSec: int32(*option.ttlSec),
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ConcurrentWriters: *option.concurrentWriters,
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter,

2
weed/filesys/dir.go

@ -175,7 +175,7 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
TtlSec: dir.wfs.option.TtlSec,
},
},
OExcl: exlusive,
OExcl: exlusive,
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("create %s/%s", dir.FullPath(), name)

18
weed/filesys/dirty_page.go

@ -11,19 +11,19 @@ import (
)
type ContinuousDirtyPages struct {
intervals *ContinuousIntervals
f *File
writeWaitGroup sync.WaitGroup
chunkAddLock sync.Mutex
lastErr error
collection string
replication string
intervals *ContinuousIntervals
f *File
writeWaitGroup sync.WaitGroup
chunkAddLock sync.Mutex
lastErr error
collection string
replication string
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
dirtyPages := &ContinuousDirtyPages{
intervals: &ContinuousIntervals{},
f: file,
intervals: &ContinuousIntervals{},
f: file,
}
return dirtyPages
}

2
weed/server/filer_server_handlers.go

@ -20,7 +20,7 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
}
if fileId != "" {
stats.FilerRequestCounter.WithLabelValues("proxy").Inc()
fs.proxyToVolumeServer(w,r,fileId)
fs.proxyToVolumeServer(w, r, fileId)
stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds())
return
}

Loading…
Cancel
Save