Browse Source

Merge pull request #1489 from kmlebedev/promhttp

Promhttp
pull/1502/head
Chris Lu 4 years ago
committed by GitHub
parent
commit
2e9099369e
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      weed/command/filer.go
  2. 3
      weed/command/s3.go
  3. 3
      weed/command/server.go
  4. 3
      weed/command/volume.go
  5. 2
      weed/server/filer_server.go
  6. 2
      weed/server/volume_server.go
  7. 11
      weed/stats/metrics.go

3
weed/command/filer.go

@ -36,6 +36,7 @@ type FilerOptions struct {
disableHttp *bool disableHttp *bool
cipher *bool cipher *bool
peers *string peers *string
metricsHttpPort *int
// default leveldb directory, used in "weed server" mode // default leveldb directory, used in "weed server" mode
defaultLevelDbDirectory *string defaultLevelDbDirectory *string
@ -57,6 +58,7 @@ func init() {
f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed")
f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers")
f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list")
f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
} }
var cmdFiler = &Command{ var cmdFiler = &Command{
@ -122,6 +124,7 @@ func (fo *FilerOptions) startFiler() {
Port: uint32(*fo.port), Port: uint32(*fo.port),
Cipher: *fo.cipher, Cipher: *fo.cipher,
Filers: peers, Filers: peers,
MetricsHttpPort: *fo.metricsHttpPort,
}) })
if nfs_err != nil { if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err) glog.Fatalf("Filer startup error: %v", nfs_err)

3
weed/command/s3.go

@ -29,6 +29,7 @@ type S3Options struct {
domainName *string domainName *string
tlsPrivateKey *string tlsPrivateKey *string
tlsCertificate *string tlsCertificate *string
metricsHttpPort *int
} }
func init() { func init() {
@ -39,6 +40,7 @@ func init() {
s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file")
s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file")
s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file")
s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
} }
var cmdS3 = &Command{ var cmdS3 = &Command{
@ -153,6 +155,7 @@ func (s3opt *S3Options) startS3Server() bool {
} }
} }
go stats_collect.StartMetricsServer(stats_collect.S3Gather, *s3opt.metricsHttpPort)
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), stats_collect.S3Gather, metricsAddress, metricsIntervalSec) go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), stats_collect.S3Gather, metricsAddress, metricsIntervalSec)
router := mux.NewRouter().SkipClean(true) router := mux.NewRouter().SkipClean(true)

3
weed/command/server.go

@ -89,6 +89,7 @@ func init() {
filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size")
filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers")
filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list")
filerOptions.metricsHttpPort = cmdServer.Flag.Int("filer.metricsPort", 0, "Prometheus metrics listen port")
serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port")
serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port")
@ -100,12 +101,14 @@ func init() {
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server")
serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
serverOptions.v.metricsHttpPort = cmdServer.Flag.Int("volume.metricsPort", 0, "Prometheus metrics listen port")
s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port")
s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}")
s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file")
s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file")
s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file")
s3Options.metricsHttpPort = cmdServer.Flag.Int("s3.metricsPort", 0, "Prometheus metrics listen port")
msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port")

3
weed/command/volume.go

@ -56,6 +56,7 @@ type VolumeServerOptions struct {
minFreeSpacePercents []float32 minFreeSpacePercents []float32
pprof *bool pprof *bool
preStopSeconds *int preStopSeconds *int
metricsHttpPort *int
// pulseSeconds *int // pulseSeconds *int
} }
@ -80,6 +81,7 @@ func init() {
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second")
v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 1024, "limit file size to avoid out of memory") v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 1024, "limit file size to avoid out of memory")
v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile")
v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
@ -207,6 +209,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond, *v.compactionMBPerSecond,
*v.fileSizeLimitMB, *v.fileSizeLimitMB,
*v.metricsHttpPort,
) )
// starting grpc server // starting grpc server
grpcS := v.startGrpcService(volumeServer) grpcS := v.startGrpcService(volumeServer)

2
weed/server/filer_server.go

@ -54,6 +54,7 @@ type FilerOption struct {
recursiveDelete bool recursiveDelete bool
Cipher bool Cipher bool
Filers []string Filers []string
MetricsHttpPort int
} }
type FilerServer struct { type FilerServer struct {
@ -157,6 +158,7 @@ func (fs *FilerServer) maybeStartMetrics() {
} }
} }
go stats.StartMetricsServer(stats.FilerGather, fs.option.MetricsHttpPort)
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), stats.FilerGather, fs.metricsAddress, fs.metricsIntervalSec) go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), stats.FilerGather, fs.metricsAddress, fs.metricsIntervalSec)
} }

2
weed/server/volume_server.go

@ -46,6 +46,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
readRedirect bool, readRedirect bool,
compactionMBPerSecond int, compactionMBPerSecond int,
fileSizeLimitMB int, fileSizeLimitMB int,
metricsHttpPort int,
) *VolumeServer { ) *VolumeServer {
v := util.GetViper() v := util.GetViper()
@ -97,6 +98,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
} }
go vs.heartbeat() go vs.heartbeat()
go stats.StartMetricsServer(stats.VolumeServerGather, metricsHttpPort)
go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), stats.VolumeServerGather, vs.metricsAddress, vs.metricsIntervalSec) go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), stats.VolumeServerGather, vs.metricsAddress, vs.metricsIntervalSec)
return vs return vs

11
weed/stats/metrics.go

@ -2,11 +2,14 @@ package stats
import ( import (
"fmt" "fmt"
"log"
"net/http"
"os" "os"
"strings" "strings"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/push" "github.com/prometheus/client_golang/prometheus/push"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -150,6 +153,14 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, add
} }
} }
func StartMetricsServer(gatherer *prometheus.Registry, port int) {
if port == 0 {
return
}
http.Handle("/metrics", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil))
}
func SourceName(port uint32) string { func SourceName(port uint32) string {
hostname, err := os.Hostname() hostname, err := os.Hostname()
if err != nil { if err != nil {

Loading…
Cancel
Save