Browse Source

add start metrics server

pull/1489/head
Konstantin Lebedev 4 years ago
parent
commit
324e44d4b3
  1. 4
      weed/command/filer.go
  2. 4
      weed/command/s3.go
  3. 3
      weed/command/server.go
  4. 3
      weed/command/volume.go
  5. 2
      weed/server/filer_server.go
  6. 2
      weed/server/volume_server.go
  7. 11
      weed/stats/metrics.go

4
weed/command/filer.go

@ -36,7 +36,7 @@ type FilerOptions struct {
disableHttp *bool
cipher *bool
peers *string
metricsHttpPort *int
// default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string
}
@ -57,6 +57,7 @@ func init() {
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
}
var cmdFiler = &Command{
@ -122,6 +123,7 @@ func (fo *FilerOptions) startFiler() {
Port: uint32(*fo.port),
Cipher: *fo.cipher,
Filers: peers,
MetricsHttpPort: *fo.metricsHttpPort,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)

4
weed/command/s3.go

@ -29,6 +29,7 @@ type S3Options struct {
domainName *string
tlsPrivateKey *string
tlsCertificate *string
metricsHttpPort *int
}
func init() {
@ -39,6 +40,7 @@ func init() {
s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
}
var cmdS3 = &Command{
@ -153,6 +155,7 @@ func (s3opt *S3Options) startS3Server() bool {
}
}
go stats_collect.StartMetricsServer(stats_collect.S3Gather, *s3opt.metricsHttpPort)
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), stats_collect.S3Gather, metricsAddress, metricsIntervalSec)
router := mux.NewRouter().SkipClean(true)
@ -169,7 +172,6 @@ func (s3opt *S3Options) startS3Server() bool {
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
}
httpS := &http.Server{Handler: router}
listenAddress := fmt.Sprintf(":%d", *s3opt.port)

3
weed/command/server.go

@ -89,6 +89,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.metricsHttpPort = cmdServer.Flag.Int("filer.metricsPort", 0, "Prometheus metrics listen port")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@ -100,12 +101,14 @@ func init() {
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
serverOptions.v.metricsHttpPort = cmdServer.Flag.Int("volume.metricsPort", 0, "Prometheus metrics listen port")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
s3Options.metricsHttpPort = cmdServer.Flag.Int("s3.metricsPort", 0, "Prometheus metrics listen port")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")

3
weed/command/volume.go

@ -56,6 +56,7 @@ type VolumeServerOptions struct {
minFreeSpacePercents []float32
pprof *bool
preStopSeconds *int
metricsHttpPort *int
// pulseSeconds *int
}
@ -80,6 +81,7 @@ func init() {
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 1024, "limit file size to avoid out of memory")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
}
var cmdVolume = &Command{
@ -207,6 +209,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond,
*v.fileSizeLimitMB,
*v.metricsHttpPort,
)
// starting grpc server
grpcS := v.startGrpcService(volumeServer)

2
weed/server/filer_server.go

@ -54,6 +54,7 @@ type FilerOption struct {
recursiveDelete bool
Cipher bool
Filers []string
MetricsHttpPort int
}
type FilerServer struct {
@ -157,6 +158,7 @@ func (fs *FilerServer) maybeStartMetrics() {
}
}
go stats.StartMetricsServer(stats.FilerGather, fs.option.MetricsHttpPort)
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), stats.FilerGather, fs.metricsAddress, fs.metricsIntervalSec)
}

2
weed/server/volume_server.go

@ -46,6 +46,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readRedirect bool,
compactionMBPerSecond int,
fileSizeLimitMB int,
metricsHttpPort int,
) *VolumeServer {
v := util.GetViper()
@ -97,6 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
}
go vs.heartbeat()
go stats.StartMetricsServer(stats.VolumeServerGather, metricsHttpPort)
go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), stats.VolumeServerGather, vs.metricsAddress, vs.metricsIntervalSec)
return vs

11
weed/stats/metrics.go

@ -2,11 +2,14 @@ package stats
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/push"
"github.com/chrislusf/seaweedfs/weed/glog"
@ -150,6 +153,14 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, add
}
}
func StartMetricsServer(gatherer *prometheus.Registry, port int) {
if port == 0 {
return
}
http.Handle("/metrics", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
}
func SourceName(port uint32) string {
hostname, err := os.Hostname()
if err != nil {

Loading…
Cancel
Save