mirror of https://github.com/matrix-org/go-neb.git
200 changed files with 41634 additions and 0 deletions
-
61vendor/manifest
-
63vendor/src/github.com/beorn7/perks/quantile/bench_test.go
-
121vendor/src/github.com/beorn7/perks/quantile/example_test.go
-
2388vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
-
292vendor/src/github.com/beorn7/perks/quantile/stream.go
-
215vendor/src/github.com/beorn7/perks/quantile/stream_test.go
-
43vendor/src/github.com/golang/protobuf/proto/Makefile
-
2278vendor/src/github.com/golang/protobuf/proto/all_test.go
-
300vendor/src/github.com/golang/protobuf/proto/any_test.go
-
229vendor/src/github.com/golang/protobuf/proto/clone.go
-
300vendor/src/github.com/golang/protobuf/proto/clone_test.go
-
970vendor/src/github.com/golang/protobuf/proto/decode.go
-
256vendor/src/github.com/golang/protobuf/proto/decode_test.go
-
1355vendor/src/github.com/golang/protobuf/proto/encode.go
-
83vendor/src/github.com/golang/protobuf/proto/encode_test.go
-
300vendor/src/github.com/golang/protobuf/proto/equal.go
-
224vendor/src/github.com/golang/protobuf/proto/equal_test.go
-
586vendor/src/github.com/golang/protobuf/proto/extensions.go
-
508vendor/src/github.com/golang/protobuf/proto/extensions_test.go
-
898vendor/src/github.com/golang/protobuf/proto/lib.go
-
311vendor/src/github.com/golang/protobuf/proto/message_set.go
-
66vendor/src/github.com/golang/protobuf/proto/message_set_test.go
-
484vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
-
270vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
-
872vendor/src/github.com/golang/protobuf/proto/properties.go
-
219vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
-
78vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
-
125vendor/src/github.com/golang/protobuf/proto/proto3_test.go
-
63vendor/src/github.com/golang/protobuf/proto/size2_test.go
-
164vendor/src/github.com/golang/protobuf/proto/size_test.go
-
50vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
-
86vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
-
4061vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
-
548vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
-
854vendor/src/github.com/golang/protobuf/proto/text.go
-
891vendor/src/github.com/golang/protobuf/proto/text_parser.go
-
662vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
-
474vendor/src/github.com/golang/protobuf/proto/text_test.go
-
7vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
-
178vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
-
75vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
-
99vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go
-
16vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
-
46vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
-
67vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go
-
18vendor/src/github.com/prometheus/client_golang/AUTHORS.md
-
109vendor/src/github.com/prometheus/client_golang/CHANGELOG.md
-
18vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md
-
201vendor/src/github.com/prometheus/client_golang/LICENSE
-
23vendor/src/github.com/prometheus/client_golang/NOTICE
-
46vendor/src/github.com/prometheus/client_golang/README.md
-
1vendor/src/github.com/prometheus/client_golang/VERSION
-
348vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go
-
453vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go
-
103vendor/src/github.com/prometheus/client_golang/examples/random/main.go
-
30vendor/src/github.com/prometheus/client_golang/examples/simple/main.go
-
1vendor/src/github.com/prometheus/client_golang/prometheus/README.md
-
183vendor/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go
-
75vendor/src/github.com/prometheus/client_golang/prometheus/collector.go
-
172vendor/src/github.com/prometheus/client_golang/prometheus/counter.go
-
58vendor/src/github.com/prometheus/client_golang/prometheus/counter_test.go
-
205vendor/src/github.com/prometheus/client_golang/prometheus/desc.go
-
181vendor/src/github.com/prometheus/client_golang/prometheus/doc.go
-
118vendor/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
-
752vendor/src/github.com/prometheus/client_golang/prometheus/examples_test.go
-
119vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector.go
-
97vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
-
29vendor/src/github.com/prometheus/client_golang/prometheus/fnv.go
-
140vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
-
182vendor/src/github.com/prometheus/client_golang/prometheus/gauge_test.go
-
263vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go
-
123vendor/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go
-
444vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go
-
326vendor/src/github.com/prometheus/client_golang/prometheus/histogram_test.go
-
499vendor/src/github.com/prometheus/client_golang/prometheus/http.go
-
121vendor/src/github.com/prometheus/client_golang/prometheus/http_test.go
-
166vendor/src/github.com/prometheus/client_golang/prometheus/metric.go
-
35vendor/src/github.com/prometheus/client_golang/prometheus/metric_test.go
-
142vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go
-
58vendor/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go
-
201vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http.go
-
137vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
-
56vendor/src/github.com/prometheus/client_golang/prometheus/push/examples_test.go
-
172vendor/src/github.com/prometheus/client_golang/prometheus/push/push.go
-
176vendor/src/github.com/prometheus/client_golang/prometheus/push/push_test.go
-
806vendor/src/github.com/prometheus/client_golang/prometheus/registry.go
-
545vendor/src/github.com/prometheus/client_golang/prometheus/registry_test.go
-
534vendor/src/github.com/prometheus/client_golang/prometheus/summary.go
-
347vendor/src/github.com/prometheus/client_golang/prometheus/summary_test.go
-
138vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go
-
234vendor/src/github.com/prometheus/client_golang/prometheus/value.go
-
404vendor/src/github.com/prometheus/client_golang/prometheus/vec.go
-
312vendor/src/github.com/prometheus/client_golang/prometheus/vec_test.go
-
364vendor/src/github.com/prometheus/client_model/go/metrics.pb.go
-
167vendor/src/github.com/prometheus/common/expfmt/bench_test.go
-
412vendor/src/github.com/prometheus/common/expfmt/decode.go
-
367vendor/src/github.com/prometheus/common/expfmt/decode_test.go
-
88vendor/src/github.com/prometheus/common/expfmt/encode.go
-
37vendor/src/github.com/prometheus/common/expfmt/expfmt.go
-
36vendor/src/github.com/prometheus/common/expfmt/fuzz.go
@ -0,0 +1,63 @@ |
|||
package quantile |
|||
|
|||
import ( |
|||
"testing" |
|||
) |
|||
|
|||
func BenchmarkInsertTargeted(b *testing.B) { |
|||
b.ReportAllocs() |
|||
|
|||
s := NewTargeted(Targets) |
|||
b.ResetTimer() |
|||
for i := float64(0); i < float64(b.N); i++ { |
|||
s.Insert(i) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { |
|||
s := NewTargeted(TargetsSmallEpsilon) |
|||
b.ResetTimer() |
|||
for i := float64(0); i < float64(b.N); i++ { |
|||
s.Insert(i) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkInsertBiased(b *testing.B) { |
|||
s := NewLowBiased(0.01) |
|||
b.ResetTimer() |
|||
for i := float64(0); i < float64(b.N); i++ { |
|||
s.Insert(i) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { |
|||
s := NewLowBiased(0.0001) |
|||
b.ResetTimer() |
|||
for i := float64(0); i < float64(b.N); i++ { |
|||
s.Insert(i) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkQuery(b *testing.B) { |
|||
s := NewTargeted(Targets) |
|||
for i := float64(0); i < 1e6; i++ { |
|||
s.Insert(i) |
|||
} |
|||
b.ResetTimer() |
|||
n := float64(b.N) |
|||
for i := float64(0); i < n; i++ { |
|||
s.Query(i / n) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkQuerySmallEpsilon(b *testing.B) { |
|||
s := NewTargeted(TargetsSmallEpsilon) |
|||
for i := float64(0); i < 1e6; i++ { |
|||
s.Insert(i) |
|||
} |
|||
b.ResetTimer() |
|||
n := float64(b.N) |
|||
for i := float64(0); i < n; i++ { |
|||
s.Query(i / n) |
|||
} |
|||
} |
|||
@ -0,0 +1,121 @@ |
|||
// +build go1.1
|
|||
|
|||
package quantile_test |
|||
|
|||
import ( |
|||
"bufio" |
|||
"fmt" |
|||
"log" |
|||
"os" |
|||
"strconv" |
|||
"time" |
|||
|
|||
"github.com/beorn7/perks/quantile" |
|||
) |
|||
|
|||
func Example_simple() { |
|||
ch := make(chan float64) |
|||
go sendFloats(ch) |
|||
|
|||
// Compute the 50th, 90th, and 99th percentile.
|
|||
q := quantile.NewTargeted(map[float64]float64{ |
|||
0.50: 0.005, |
|||
0.90: 0.001, |
|||
0.99: 0.0001, |
|||
}) |
|||
for v := range ch { |
|||
q.Insert(v) |
|||
} |
|||
|
|||
fmt.Println("perc50:", q.Query(0.50)) |
|||
fmt.Println("perc90:", q.Query(0.90)) |
|||
fmt.Println("perc99:", q.Query(0.99)) |
|||
fmt.Println("count:", q.Count()) |
|||
// Output:
|
|||
// perc50: 5
|
|||
// perc90: 16
|
|||
// perc99: 223
|
|||
// count: 2388
|
|||
} |
|||
|
|||
func Example_mergeMultipleStreams() { |
|||
// Scenario:
|
|||
// We have multiple database shards. On each shard, there is a process
|
|||
// collecting query response times from the database logs and inserting
|
|||
// them into a Stream (created via NewTargeted(0.90)), much like the
|
|||
// Simple example. These processes expose a network interface for us to
|
|||
// ask them to serialize and send us the results of their
|
|||
// Stream.Samples so we may Merge and Query them.
|
|||
//
|
|||
// NOTES:
|
|||
// * These sample sets are small, allowing us to get them
|
|||
// across the network much faster than sending the entire list of data
|
|||
// points.
|
|||
//
|
|||
// * For this to work correctly, we must supply the same quantiles
|
|||
// a priori the process collecting the samples supplied to NewTargeted,
|
|||
// even if we do not plan to query them all here.
|
|||
ch := make(chan quantile.Samples) |
|||
getDBQuerySamples(ch) |
|||
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) |
|||
for samples := range ch { |
|||
q.Merge(samples) |
|||
} |
|||
fmt.Println("perc90:", q.Query(0.90)) |
|||
} |
|||
|
|||
func Example_window() { |
|||
// Scenario: We want the 90th, 95th, and 99th percentiles for each
|
|||
// minute.
|
|||
|
|||
ch := make(chan float64) |
|||
go sendStreamValues(ch) |
|||
|
|||
tick := time.NewTicker(1 * time.Minute) |
|||
q := quantile.NewTargeted(map[float64]float64{ |
|||
0.90: 0.001, |
|||
0.95: 0.0005, |
|||
0.99: 0.0001, |
|||
}) |
|||
for { |
|||
select { |
|||
case t := <-tick.C: |
|||
flushToDB(t, q.Samples()) |
|||
q.Reset() |
|||
case v := <-ch: |
|||
q.Insert(v) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func sendStreamValues(ch chan float64) { |
|||
// Use your imagination
|
|||
} |
|||
|
|||
func flushToDB(t time.Time, samples quantile.Samples) { |
|||
// Use your imagination
|
|||
} |
|||
|
|||
// This is a stub for the above example. In reality this would hit the remote
|
|||
// servers via http or something like it.
|
|||
func getDBQuerySamples(ch chan quantile.Samples) {} |
|||
|
|||
func sendFloats(ch chan<- float64) { |
|||
f, err := os.Open("exampledata.txt") |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
sc := bufio.NewScanner(f) |
|||
for sc.Scan() { |
|||
b := sc.Bytes() |
|||
v, err := strconv.ParseFloat(string(b), 64) |
|||
if err != nil { |
|||
log.Fatal(err) |
|||
} |
|||
ch <- v |
|||
} |
|||
if sc.Err() != nil { |
|||
log.Fatal(sc.Err()) |
|||
} |
|||
close(ch) |
|||
} |
|||
2388
vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,292 @@ |
|||
// Package quantile computes approximate quantiles over an unbounded data
|
|||
// stream within low memory and CPU bounds.
|
|||
//
|
|||
// A small amount of accuracy is traded to achieve the above properties.
|
|||
//
|
|||
// Multiple streams can be merged before calling Query to generate a single set
|
|||
// of results. This is meaningful when the streams represent the same type of
|
|||
// data. See Merge and Samples.
|
|||
//
|
|||
// For more detailed information about the algorithm used, see:
|
|||
//
|
|||
// Effective Computation of Biased Quantiles over Data Streams
|
|||
//
|
|||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|||
package quantile |
|||
|
|||
import ( |
|||
"math" |
|||
"sort" |
|||
) |
|||
|
|||
// Sample holds an observed value and meta information for compression. JSON
|
|||
// tags have been added for convenience.
|
|||
type Sample struct { |
|||
Value float64 `json:",string"` |
|||
Width float64 `json:",string"` |
|||
Delta float64 `json:",string"` |
|||
} |
|||
|
|||
// Samples represents a slice of samples. It implements sort.Interface.
|
|||
type Samples []Sample |
|||
|
|||
func (a Samples) Len() int { return len(a) } |
|||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } |
|||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } |
|||
|
|||
type invariant func(s *stream, r float64) float64 |
|||
|
|||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|||
// error guarantees can still be given even for the lower ranks of the data
|
|||
// distribution.
|
|||
//
|
|||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|||
//
|
|||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|||
// properties.
|
|||
func NewLowBiased(epsilon float64) *Stream { |
|||
ƒ := func(s *stream, r float64) float64 { |
|||
return 2 * epsilon * r |
|||
} |
|||
return newStream(ƒ) |
|||
} |
|||
|
|||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|||
// error guarantees can still be given even for the higher ranks of the data
|
|||
// distribution.
|
|||
//
|
|||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|||
//
|
|||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|||
// properties.
|
|||
func NewHighBiased(epsilon float64) *Stream { |
|||
ƒ := func(s *stream, r float64) float64 { |
|||
return 2 * epsilon * (s.n - r) |
|||
} |
|||
return newStream(ƒ) |
|||
} |
|||
|
|||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|||
// space and computation time. The targets map maps the desired quantiles to
|
|||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|||
// is guaranteed to be within (Quantile±Epsilon).
|
|||
//
|
|||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|||
func NewTargeted(targets map[float64]float64) *Stream { |
|||
ƒ := func(s *stream, r float64) float64 { |
|||
var m = math.MaxFloat64 |
|||
var f float64 |
|||
for quantile, epsilon := range targets { |
|||
if quantile*s.n <= r { |
|||
f = (2 * epsilon * r) / quantile |
|||
} else { |
|||
f = (2 * epsilon * (s.n - r)) / (1 - quantile) |
|||
} |
|||
if f < m { |
|||
m = f |
|||
} |
|||
} |
|||
return m |
|||
} |
|||
return newStream(ƒ) |
|||
} |
|||
|
|||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|||
// design. Take care when using across multiple goroutines.
|
|||
type Stream struct { |
|||
*stream |
|||
b Samples |
|||
sorted bool |
|||
} |
|||
|
|||
func newStream(ƒ invariant) *Stream { |
|||
x := &stream{ƒ: ƒ} |
|||
return &Stream{x, make(Samples, 0, 500), true} |
|||
} |
|||
|
|||
// Insert inserts v into the stream.
|
|||
func (s *Stream) Insert(v float64) { |
|||
s.insert(Sample{Value: v, Width: 1}) |
|||
} |
|||
|
|||
func (s *Stream) insert(sample Sample) { |
|||
s.b = append(s.b, sample) |
|||
s.sorted = false |
|||
if len(s.b) == cap(s.b) { |
|||
s.flush() |
|||
} |
|||
} |
|||
|
|||
// Query returns the computed qth percentiles value. If s was created with
|
|||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|||
// will return an unspecified result.
|
|||
func (s *Stream) Query(q float64) float64 { |
|||
if !s.flushed() { |
|||
// Fast path when there hasn't been enough data for a flush;
|
|||
// this also yields better accuracy for small sets of data.
|
|||
l := len(s.b) |
|||
if l == 0 { |
|||
return 0 |
|||
} |
|||
i := int(math.Ceil(float64(l) * q)) |
|||
if i > 0 { |
|||
i -= 1 |
|||
} |
|||
s.maybeSort() |
|||
return s.b[i].Value |
|||
} |
|||
s.flush() |
|||
return s.stream.query(q) |
|||
} |
|||
|
|||
// Merge merges samples into the underlying streams samples. This is handy when
|
|||
// merging multiple streams from separate threads, database shards, etc.
|
|||
//
|
|||
// ATTENTION: This method is broken and does not yield correct results. The
|
|||
// underlying algorithm is not capable of merging streams correctly.
|
|||
func (s *Stream) Merge(samples Samples) { |
|||
sort.Sort(samples) |
|||
s.stream.merge(samples) |
|||
} |
|||
|
|||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|||
func (s *Stream) Reset() { |
|||
s.stream.reset() |
|||
s.b = s.b[:0] |
|||
} |
|||
|
|||
// Samples returns stream samples held by s.
|
|||
func (s *Stream) Samples() Samples { |
|||
if !s.flushed() { |
|||
return s.b |
|||
} |
|||
s.flush() |
|||
return s.stream.samples() |
|||
} |
|||
|
|||
// Count returns the total number of samples observed in the stream
|
|||
// since initialization.
|
|||
func (s *Stream) Count() int { |
|||
return len(s.b) + s.stream.count() |
|||
} |
|||
|
|||
func (s *Stream) flush() { |
|||
s.maybeSort() |
|||
s.stream.merge(s.b) |
|||
s.b = s.b[:0] |
|||
} |
|||
|
|||
func (s *Stream) maybeSort() { |
|||
if !s.sorted { |
|||
s.sorted = true |
|||
sort.Sort(s.b) |
|||
} |
|||
} |
|||
|
|||
func (s *Stream) flushed() bool { |
|||
return len(s.stream.l) > 0 |
|||
} |
|||
|
|||
type stream struct { |
|||
n float64 |
|||
l []Sample |
|||
ƒ invariant |
|||
} |
|||
|
|||
func (s *stream) reset() { |
|||
s.l = s.l[:0] |
|||
s.n = 0 |
|||
} |
|||
|
|||
func (s *stream) insert(v float64) { |
|||
s.merge(Samples{{v, 1, 0}}) |
|||
} |
|||
|
|||
func (s *stream) merge(samples Samples) { |
|||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|||
// whole summaries. The paper doesn't mention merging summaries at
|
|||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|||
// do merges properly.
|
|||
var r float64 |
|||
i := 0 |
|||
for _, sample := range samples { |
|||
for ; i < len(s.l); i++ { |
|||
c := s.l[i] |
|||
if c.Value > sample.Value { |
|||
// Insert at position i.
|
|||
s.l = append(s.l, Sample{}) |
|||
copy(s.l[i+1:], s.l[i:]) |
|||
s.l[i] = Sample{ |
|||
sample.Value, |
|||
sample.Width, |
|||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), |
|||
// TODO(beorn7): How to calculate delta correctly?
|
|||
} |
|||
i++ |
|||
goto inserted |
|||
} |
|||
r += c.Width |
|||
} |
|||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) |
|||
i++ |
|||
inserted: |
|||
s.n += sample.Width |
|||
r += sample.Width |
|||
} |
|||
s.compress() |
|||
} |
|||
|
|||
func (s *stream) count() int { |
|||
return int(s.n) |
|||
} |
|||
|
|||
func (s *stream) query(q float64) float64 { |
|||
t := math.Ceil(q * s.n) |
|||
t += math.Ceil(s.ƒ(s, t) / 2) |
|||
p := s.l[0] |
|||
var r float64 |
|||
for _, c := range s.l[1:] { |
|||
r += p.Width |
|||
if r+c.Width+c.Delta > t { |
|||
return p.Value |
|||
} |
|||
p = c |
|||
} |
|||
return p.Value |
|||
} |
|||
|
|||
func (s *stream) compress() { |
|||
if len(s.l) < 2 { |
|||
return |
|||
} |
|||
x := s.l[len(s.l)-1] |
|||
xi := len(s.l) - 1 |
|||
r := s.n - 1 - x.Width |
|||
|
|||
for i := len(s.l) - 2; i >= 0; i-- { |
|||
c := s.l[i] |
|||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { |
|||
x.Width += c.Width |
|||
s.l[xi] = x |
|||
// Remove element at i.
|
|||
copy(s.l[i:], s.l[i+1:]) |
|||
s.l = s.l[:len(s.l)-1] |
|||
xi -= 1 |
|||
} else { |
|||
x = c |
|||
xi = i |
|||
} |
|||
r -= c.Width |
|||
} |
|||
} |
|||
|
|||
func (s *stream) samples() Samples { |
|||
samples := make(Samples, len(s.l)) |
|||
copy(samples, s.l) |
|||
return samples |
|||
} |
|||
@ -0,0 +1,215 @@ |
|||
package quantile |
|||
|
|||
import ( |
|||
"math" |
|||
"math/rand" |
|||
"sort" |
|||
"testing" |
|||
) |
|||
|
|||
var ( |
|||
Targets = map[float64]float64{ |
|||
0.01: 0.001, |
|||
0.10: 0.01, |
|||
0.50: 0.05, |
|||
0.90: 0.01, |
|||
0.99: 0.001, |
|||
} |
|||
TargetsSmallEpsilon = map[float64]float64{ |
|||
0.01: 0.0001, |
|||
0.10: 0.001, |
|||
0.50: 0.005, |
|||
0.90: 0.001, |
|||
0.99: 0.0001, |
|||
} |
|||
LowQuantiles = []float64{0.01, 0.1, 0.5} |
|||
HighQuantiles = []float64{0.99, 0.9, 0.5} |
|||
) |
|||
|
|||
const RelativeEpsilon = 0.01 |
|||
|
|||
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { |
|||
sort.Float64s(a) |
|||
for quantile, epsilon := range Targets { |
|||
n := float64(len(a)) |
|||
k := int(quantile * n) |
|||
if k < 1 { |
|||
k = 1 |
|||
} |
|||
lower := int((quantile - epsilon) * n) |
|||
if lower < 1 { |
|||
lower = 1 |
|||
} |
|||
upper := int(math.Ceil((quantile + epsilon) * n)) |
|||
if upper > len(a) { |
|||
upper = len(a) |
|||
} |
|||
w, min, max := a[k-1], a[lower-1], a[upper-1] |
|||
if g := s.Query(quantile); g < min || g > max { |
|||
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { |
|||
sort.Float64s(a) |
|||
for _, qu := range LowQuantiles { |
|||
n := float64(len(a)) |
|||
k := int(qu * n) |
|||
|
|||
lowerRank := int((1 - RelativeEpsilon) * qu * n) |
|||
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) |
|||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] |
|||
if g := s.Query(qu); g < min || g > max { |
|||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { |
|||
sort.Float64s(a) |
|||
for _, qu := range HighQuantiles { |
|||
n := float64(len(a)) |
|||
k := int(qu * n) |
|||
|
|||
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) |
|||
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) |
|||
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] |
|||
if g := s.Query(qu); g < min || g > max { |
|||
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func populateStream(s *Stream) []float64 { |
|||
a := make([]float64, 0, 1e5+100) |
|||
for i := 0; i < cap(a); i++ { |
|||
v := rand.NormFloat64() |
|||
// Add 5% asymmetric outliers.
|
|||
if i%20 == 0 { |
|||
v = v*v + 1 |
|||
} |
|||
s.Insert(v) |
|||
a = append(a, v) |
|||
} |
|||
return a |
|||
} |
|||
|
|||
func TestTargetedQuery(t *testing.T) { |
|||
rand.Seed(42) |
|||
s := NewTargeted(Targets) |
|||
a := populateStream(s) |
|||
verifyPercsWithAbsoluteEpsilon(t, a, s) |
|||
} |
|||
|
|||
func TestTargetedQuerySmallSampleSize(t *testing.T) { |
|||
rand.Seed(42) |
|||
s := NewTargeted(TargetsSmallEpsilon) |
|||
a := []float64{1, 2, 3, 4, 5} |
|||
for _, v := range a { |
|||
s.Insert(v) |
|||
} |
|||
verifyPercsWithAbsoluteEpsilon(t, a, s) |
|||
// If not yet flushed, results should be precise:
|
|||
if !s.flushed() { |
|||
for φ, want := range map[float64]float64{ |
|||
0.01: 1, |
|||
0.10: 1, |
|||
0.50: 3, |
|||
0.90: 5, |
|||
0.99: 5, |
|||
} { |
|||
if got := s.Query(φ); got != want { |
|||
t.Errorf("want %f for φ=%f, got %f", want, φ, got) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestLowBiasedQuery(t *testing.T) { |
|||
rand.Seed(42) |
|||
s := NewLowBiased(RelativeEpsilon) |
|||
a := populateStream(s) |
|||
verifyLowPercsWithRelativeEpsilon(t, a, s) |
|||
} |
|||
|
|||
func TestHighBiasedQuery(t *testing.T) { |
|||
rand.Seed(42) |
|||
s := NewHighBiased(RelativeEpsilon) |
|||
a := populateStream(s) |
|||
verifyHighPercsWithRelativeEpsilon(t, a, s) |
|||
} |
|||
|
|||
// BrokenTestTargetedMerge is broken, see Merge doc comment.
|
|||
func BrokenTestTargetedMerge(t *testing.T) { |
|||
rand.Seed(42) |
|||
s1 := NewTargeted(Targets) |
|||
s2 := NewTargeted(Targets) |
|||
a := populateStream(s1) |
|||
a = append(a, populateStream(s2)...) |
|||
s1.Merge(s2.Samples()) |
|||
verifyPercsWithAbsoluteEpsilon(t, a, s1) |
|||
} |
|||
|
|||
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
|
|||
func BrokenTestLowBiasedMerge(t *testing.T) { |
|||
rand.Seed(42) |
|||
s1 := NewLowBiased(RelativeEpsilon) |
|||
s2 := NewLowBiased(RelativeEpsilon) |
|||
a := populateStream(s1) |
|||
a = append(a, populateStream(s2)...) |
|||
s1.Merge(s2.Samples()) |
|||
verifyLowPercsWithRelativeEpsilon(t, a, s2) |
|||
} |
|||
|
|||
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
|
|||
func BrokenTestHighBiasedMerge(t *testing.T) { |
|||
rand.Seed(42) |
|||
s1 := NewHighBiased(RelativeEpsilon) |
|||
s2 := NewHighBiased(RelativeEpsilon) |
|||
a := populateStream(s1) |
|||
a = append(a, populateStream(s2)...) |
|||
s1.Merge(s2.Samples()) |
|||
verifyHighPercsWithRelativeEpsilon(t, a, s2) |
|||
} |
|||
|
|||
func TestUncompressed(t *testing.T) { |
|||
q := NewTargeted(Targets) |
|||
for i := 100; i > 0; i-- { |
|||
q.Insert(float64(i)) |
|||
} |
|||
if g := q.Count(); g != 100 { |
|||
t.Errorf("want count 100, got %d", g) |
|||
} |
|||
// Before compression, Query should have 100% accuracy.
|
|||
for quantile := range Targets { |
|||
w := quantile * 100 |
|||
if g := q.Query(quantile); g != w { |
|||
t.Errorf("want %f, got %f", w, g) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestUncompressedSamples(t *testing.T) { |
|||
q := NewTargeted(map[float64]float64{0.99: 0.001}) |
|||
for i := 1; i <= 100; i++ { |
|||
q.Insert(float64(i)) |
|||
} |
|||
if g := q.Samples().Len(); g != 100 { |
|||
t.Errorf("want count 100, got %d", g) |
|||
} |
|||
} |
|||
|
|||
func TestUncompressedOne(t *testing.T) { |
|||
q := NewTargeted(map[float64]float64{0.99: 0.01}) |
|||
q.Insert(3.14) |
|||
if g := q.Query(0.90); g != 3.14 { |
|||
t.Error("want PI, got", g) |
|||
} |
|||
} |
|||
|
|||
func TestDefaults(t *testing.T) { |
|||
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { |
|||
t.Errorf("want 0, got %f", g) |
|||
} |
|||
} |
|||
@ -0,0 +1,43 @@ |
|||
# Go support for Protocol Buffers - Google's data interchange format
|
|||
#
|
|||
# Copyright 2010 The Go Authors. All rights reserved.
|
|||
# https://github.com/golang/protobuf
|
|||
#
|
|||
# Redistribution and use in source and binary forms, with or without
|
|||
# modification, are permitted provided that the following conditions are
|
|||
# met:
|
|||
#
|
|||
# * Redistributions of source code must retain the above copyright
|
|||
# notice, this list of conditions and the following disclaimer.
|
|||
# * Redistributions in binary form must reproduce the above
|
|||
# copyright notice, this list of conditions and the following disclaimer
|
|||
# in the documentation and/or other materials provided with the
|
|||
# distribution.
|
|||
# * Neither the name of Google Inc. nor the names of its
|
|||
# contributors may be used to endorse or promote products derived from
|
|||
# this software without specific prior written permission.
|
|||
#
|
|||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
install: |
|||
go install |
|||
|
|||
test: install generate-test-pbs |
|||
go test |
|||
|
|||
|
|||
generate-test-pbs: |
|||
make install |
|||
make -C testdata |
|||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto |
|||
make |
|||
2278
vendor/src/github.com/golang/protobuf/proto/all_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,300 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"strings" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
testpb "github.com/golang/protobuf/proto/testdata" |
|||
anypb "github.com/golang/protobuf/ptypes/any" |
|||
) |
|||
|
|||
var ( |
|||
expandedMarshaler = proto.TextMarshaler{ExpandAny: true} |
|||
expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} |
|||
) |
|||
|
|||
// anyEqual reports whether two messages which may be google.protobuf.Any or may
|
|||
// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
|
|||
// comparison, because semantically equivalent messages may be marshaled to
|
|||
// binary in different tag order. Instead, trust that TextMarshaler with
|
|||
// ExpandAny option works and compare the text marshaling results.
|
|||
func anyEqual(got, want proto.Message) bool { |
|||
// if messages are proto.Equal, no need to marshal.
|
|||
if proto.Equal(got, want) { |
|||
return true |
|||
} |
|||
g := expandedMarshaler.Text(got) |
|||
w := expandedMarshaler.Text(want) |
|||
return g == w |
|||
} |
|||
|
|||
type golden struct { |
|||
m proto.Message |
|||
t, c string |
|||
} |
|||
|
|||
var goldenMessages = makeGolden() |
|||
|
|||
func makeGolden() []golden { |
|||
nested := &pb.Nested{Bunny: "Monty"} |
|||
nb, err := proto.Marshal(nested) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
m1 := &pb.Message{ |
|||
Name: "David", |
|||
ResultCount: 47, |
|||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, |
|||
} |
|||
m2 := &pb.Message{ |
|||
Name: "David", |
|||
ResultCount: 47, |
|||
Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, |
|||
} |
|||
m3 := &pb.Message{ |
|||
Name: "David", |
|||
ResultCount: 47, |
|||
Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, |
|||
} |
|||
m4 := &pb.Message{ |
|||
Name: "David", |
|||
ResultCount: 47, |
|||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, |
|||
} |
|||
m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} |
|||
|
|||
any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} |
|||
proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) |
|||
proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) |
|||
any1b, err := proto.Marshal(any1) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} |
|||
proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) |
|||
any2b, err := proto.Marshal(any2) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
m6 := &pb.Message{ |
|||
Name: "David", |
|||
ResultCount: 47, |
|||
Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, |
|||
ManyThings: []*anypb.Any{ |
|||
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, |
|||
&anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, |
|||
}, |
|||
} |
|||
|
|||
const ( |
|||
m1Golden = ` |
|||
name: "David" |
|||
result_count: 47 |
|||
anything: < |
|||
[type.googleapis.com/proto3_proto.Nested]: < |
|||
bunny: "Monty" |
|||
> |
|||
> |
|||
` |
|||
m2Golden = ` |
|||
name: "David" |
|||
result_count: 47 |
|||
anything: < |
|||
["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < |
|||
bunny: "Monty" |
|||
> |
|||
> |
|||
` |
|||
m3Golden = ` |
|||
name: "David" |
|||
result_count: 47 |
|||
anything: < |
|||
["type.googleapis.com/\"/proto3_proto.Nested"]: < |
|||
bunny: "Monty" |
|||
> |
|||
> |
|||
` |
|||
m4Golden = ` |
|||
name: "David" |
|||
result_count: 47 |
|||
anything: < |
|||
[type.googleapis.com/a/path/proto3_proto.Nested]: < |
|||
bunny: "Monty" |
|||
> |
|||
> |
|||
` |
|||
m5Golden = ` |
|||
[type.googleapis.com/proto3_proto.Nested]: < |
|||
bunny: "Monty" |
|||
> |
|||
` |
|||
m6Golden = ` |
|||
name: "David" |
|||
result_count: 47 |
|||
anything: < |
|||
[type.googleapis.com/testdata.MyMessage]: < |
|||
count: 47 |
|||
name: "David" |
|||
[testdata.Ext.more]: < |
|||
data: "foo" |
|||
> |
|||
[testdata.Ext.text]: "bar" |
|||
> |
|||
> |
|||
many_things: < |
|||
[type.googleapis.com/testdata.MyMessage]: < |
|||
count: 42 |
|||
bikeshed: GREEN |
|||
rep_bytes: "roboto" |
|||
[testdata.Ext.more]: < |
|||
data: "baz" |
|||
> |
|||
> |
|||
> |
|||
many_things: < |
|||
[type.googleapis.com/testdata.MyMessage]: < |
|||
count: 47 |
|||
name: "David" |
|||
[testdata.Ext.more]: < |
|||
data: "foo" |
|||
> |
|||
[testdata.Ext.text]: "bar" |
|||
> |
|||
> |
|||
` |
|||
) |
|||
return []golden{ |
|||
{m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, |
|||
{m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, |
|||
{m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, |
|||
{m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, |
|||
{m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, |
|||
{m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, |
|||
} |
|||
} |
|||
|
|||
func TestMarshalGolden(t *testing.T) { |
|||
for _, tt := range goldenMessages { |
|||
if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { |
|||
t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) |
|||
} |
|||
if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { |
|||
t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestUnmarshalGolden(t *testing.T) { |
|||
for _, tt := range goldenMessages { |
|||
want := tt.m |
|||
got := proto.Clone(tt.m) |
|||
got.Reset() |
|||
if err := proto.UnmarshalText(tt.t, got); err != nil { |
|||
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) |
|||
} |
|||
if !anyEqual(got, want) { |
|||
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) |
|||
} |
|||
got.Reset() |
|||
if err := proto.UnmarshalText(tt.c, got); err != nil { |
|||
t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) |
|||
} |
|||
if !anyEqual(got, want) { |
|||
t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestMarshalUnknownAny(t *testing.T) { |
|||
m := &pb.Message{ |
|||
Anything: &anypb.Any{ |
|||
TypeUrl: "foo", |
|||
Value: []byte("bar"), |
|||
}, |
|||
} |
|||
want := `anything: < |
|||
type_url: "foo" |
|||
value: "bar" |
|||
> |
|||
` |
|||
got := expandedMarshaler.Text(m) |
|||
if got != want { |
|||
t.Errorf("got\n`%s`\nwant\n`%s`", got, want) |
|||
} |
|||
} |
|||
|
|||
func TestAmbiguousAny(t *testing.T) { |
|||
pb := &anypb.Any{} |
|||
err := proto.UnmarshalText(` |
|||
type_url: "ttt/proto3_proto.Nested" |
|||
value: "\n\x05Monty" |
|||
`, pb) |
|||
t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) |
|||
if err != nil { |
|||
t.Errorf("failed to parse ambiguous Any message: %v", err) |
|||
} |
|||
} |
|||
|
|||
func TestUnmarshalOverwriteAny(t *testing.T) { |
|||
pb := &anypb.Any{} |
|||
err := proto.UnmarshalText(` |
|||
[type.googleapis.com/a/path/proto3_proto.Nested]: < |
|||
bunny: "Monty" |
|||
> |
|||
[type.googleapis.com/a/path/proto3_proto.Nested]: < |
|||
bunny: "Rabbit of Caerbannog" |
|||
> |
|||
`, pb) |
|||
want := `line 7: Any message unpacked multiple times, or "type_url" already set` |
|||
if err.Error() != want { |
|||
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) |
|||
} |
|||
} |
|||
|
|||
func TestUnmarshalAnyMixAndMatch(t *testing.T) { |
|||
pb := &anypb.Any{} |
|||
err := proto.UnmarshalText(` |
|||
value: "\n\x05Monty" |
|||
[type.googleapis.com/a/path/proto3_proto.Nested]: < |
|||
bunny: "Rabbit of Caerbannog" |
|||
> |
|||
`, pb) |
|||
want := `line 5: Any message unpacked multiple times, or "value" already set` |
|||
if err.Error() != want { |
|||
t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) |
|||
} |
|||
} |
|||
@ -0,0 +1,229 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// Protocol buffer deep copy and merge.
|
|||
// TODO: RawMessage.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"log" |
|||
"reflect" |
|||
"strings" |
|||
) |
|||
|
|||
// Clone returns a deep copy of a protocol buffer.
|
|||
func Clone(pb Message) Message { |
|||
in := reflect.ValueOf(pb) |
|||
if in.IsNil() { |
|||
return pb |
|||
} |
|||
|
|||
out := reflect.New(in.Type().Elem()) |
|||
// out is empty so a merge is a deep copy.
|
|||
mergeStruct(out.Elem(), in.Elem()) |
|||
return out.Interface().(Message) |
|||
} |
|||
|
|||
// Merge merges src into dst.
|
|||
// Required and optional fields that are set in src will be set to that value in dst.
|
|||
// Elements of repeated fields will be appended.
|
|||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
|||
func Merge(dst, src Message) { |
|||
in := reflect.ValueOf(src) |
|||
out := reflect.ValueOf(dst) |
|||
if out.IsNil() { |
|||
panic("proto: nil destination") |
|||
} |
|||
if in.Type() != out.Type() { |
|||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
|||
panic("proto: type mismatch") |
|||
} |
|||
if in.IsNil() { |
|||
// Merging nil into non-nil is a quiet no-op
|
|||
return |
|||
} |
|||
mergeStruct(out.Elem(), in.Elem()) |
|||
} |
|||
|
|||
func mergeStruct(out, in reflect.Value) { |
|||
sprop := GetProperties(in.Type()) |
|||
for i := 0; i < in.NumField(); i++ { |
|||
f := in.Type().Field(i) |
|||
if strings.HasPrefix(f.Name, "XXX_") { |
|||
continue |
|||
} |
|||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) |
|||
} |
|||
|
|||
if emIn, ok := extendable(in.Addr().Interface()); ok { |
|||
emOut, _ := extendable(out.Addr().Interface()) |
|||
mIn, muIn := emIn.extensionsRead() |
|||
if mIn != nil { |
|||
mOut := emOut.extensionsWrite() |
|||
muIn.Lock() |
|||
mergeExtension(mOut, mIn) |
|||
muIn.Unlock() |
|||
} |
|||
} |
|||
|
|||
uf := in.FieldByName("XXX_unrecognized") |
|||
if !uf.IsValid() { |
|||
return |
|||
} |
|||
uin := uf.Bytes() |
|||
if len(uin) > 0 { |
|||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) |
|||
} |
|||
} |
|||
|
|||
// mergeAny performs a merge between two values of the same type.
|
|||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
|||
// prop is set if this is a struct field (it may be nil).
|
|||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { |
|||
if in.Type() == protoMessageType { |
|||
if !in.IsNil() { |
|||
if out.IsNil() { |
|||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) |
|||
} else { |
|||
Merge(out.Interface().(Message), in.Interface().(Message)) |
|||
} |
|||
} |
|||
return |
|||
} |
|||
switch in.Kind() { |
|||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, |
|||
reflect.String, reflect.Uint32, reflect.Uint64: |
|||
if !viaPtr && isProto3Zero(in) { |
|||
return |
|||
} |
|||
out.Set(in) |
|||
case reflect.Interface: |
|||
// Probably a oneof field; copy non-nil values.
|
|||
if in.IsNil() { |
|||
return |
|||
} |
|||
// Allocate destination if it is not set, or set to a different type.
|
|||
// Otherwise we will merge as normal.
|
|||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() { |
|||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
|||
} |
|||
mergeAny(out.Elem(), in.Elem(), false, nil) |
|||
case reflect.Map: |
|||
if in.Len() == 0 { |
|||
return |
|||
} |
|||
if out.IsNil() { |
|||
out.Set(reflect.MakeMap(in.Type())) |
|||
} |
|||
// For maps with value types of *T or []byte we need to deep copy each value.
|
|||
elemKind := in.Type().Elem().Kind() |
|||
for _, key := range in.MapKeys() { |
|||
var val reflect.Value |
|||
switch elemKind { |
|||
case reflect.Ptr: |
|||
val = reflect.New(in.Type().Elem().Elem()) |
|||
mergeAny(val, in.MapIndex(key), false, nil) |
|||
case reflect.Slice: |
|||
val = in.MapIndex(key) |
|||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) |
|||
default: |
|||
val = in.MapIndex(key) |
|||
} |
|||
out.SetMapIndex(key, val) |
|||
} |
|||
case reflect.Ptr: |
|||
if in.IsNil() { |
|||
return |
|||
} |
|||
if out.IsNil() { |
|||
out.Set(reflect.New(in.Elem().Type())) |
|||
} |
|||
mergeAny(out.Elem(), in.Elem(), true, nil) |
|||
case reflect.Slice: |
|||
if in.IsNil() { |
|||
return |
|||
} |
|||
if in.Type().Elem().Kind() == reflect.Uint8 { |
|||
// []byte is a scalar bytes field, not a repeated field.
|
|||
|
|||
// Edge case: if this is in a proto3 message, a zero length
|
|||
// bytes field is considered the zero value, and should not
|
|||
// be merged.
|
|||
if prop != nil && prop.proto3 && in.Len() == 0 { |
|||
return |
|||
} |
|||
|
|||
// Make a deep copy.
|
|||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
|||
// with a nil result.
|
|||
out.SetBytes(append([]byte{}, in.Bytes()...)) |
|||
return |
|||
} |
|||
n := in.Len() |
|||
if out.IsNil() { |
|||
out.Set(reflect.MakeSlice(in.Type(), 0, n)) |
|||
} |
|||
switch in.Type().Elem().Kind() { |
|||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, |
|||
reflect.String, reflect.Uint32, reflect.Uint64: |
|||
out.Set(reflect.AppendSlice(out, in)) |
|||
default: |
|||
for i := 0; i < n; i++ { |
|||
x := reflect.Indirect(reflect.New(in.Type().Elem())) |
|||
mergeAny(x, in.Index(i), false, nil) |
|||
out.Set(reflect.Append(out, x)) |
|||
} |
|||
} |
|||
case reflect.Struct: |
|||
mergeStruct(out, in) |
|||
default: |
|||
// unknown type, so not a protocol buffer
|
|||
log.Printf("proto: don't know how to copy %v", in) |
|||
} |
|||
} |
|||
|
|||
func mergeExtension(out, in map[int32]Extension) { |
|||
for extNum, eIn := range in { |
|||
eOut := Extension{desc: eIn.desc} |
|||
if eIn.value != nil { |
|||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem() |
|||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil) |
|||
eOut.value = v.Interface() |
|||
} |
|||
if eIn.enc != nil { |
|||
eOut.enc = make([]byte, len(eIn.enc)) |
|||
copy(eOut.enc, eIn.enc) |
|||
} |
|||
|
|||
out[extNum] = eOut |
|||
} |
|||
} |
|||
@ -0,0 +1,300 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
proto3pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
pb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
var cloneTestMessage = &pb.MyMessage{ |
|||
Count: proto.Int32(42), |
|||
Name: proto.String("Dave"), |
|||
Pet: []string{"bunny", "kitty", "horsey"}, |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("niles"), |
|||
Port: proto.Int32(9099), |
|||
Connected: proto.Bool(true), |
|||
}, |
|||
Others: []*pb.OtherMessage{ |
|||
{ |
|||
Value: []byte("some bytes"), |
|||
}, |
|||
}, |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: proto.Int32(6), |
|||
}, |
|||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, |
|||
} |
|||
|
|||
func init() { |
|||
ext := &pb.Ext{ |
|||
Data: proto.String("extension"), |
|||
} |
|||
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { |
|||
panic("SetExtension: " + err.Error()) |
|||
} |
|||
} |
|||
|
|||
func TestClone(t *testing.T) { |
|||
m := proto.Clone(cloneTestMessage).(*pb.MyMessage) |
|||
if !proto.Equal(m, cloneTestMessage) { |
|||
t.Errorf("Clone(%v) = %v", cloneTestMessage, m) |
|||
} |
|||
|
|||
// Verify it was a deep copy.
|
|||
*m.Inner.Port++ |
|||
if proto.Equal(m, cloneTestMessage) { |
|||
t.Error("Mutating clone changed the original") |
|||
} |
|||
// Byte fields and repeated fields should be copied.
|
|||
if &m.Pet[0] == &cloneTestMessage.Pet[0] { |
|||
t.Error("Pet: repeated field not copied") |
|||
} |
|||
if &m.Others[0] == &cloneTestMessage.Others[0] { |
|||
t.Error("Others: repeated field not copied") |
|||
} |
|||
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { |
|||
t.Error("Others[0].Value: bytes field not copied") |
|||
} |
|||
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { |
|||
t.Error("RepBytes: repeated field not copied") |
|||
} |
|||
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { |
|||
t.Error("RepBytes[0]: bytes field not copied") |
|||
} |
|||
} |
|||
|
|||
func TestCloneNil(t *testing.T) { |
|||
var m *pb.MyMessage |
|||
if c := proto.Clone(m); !proto.Equal(m, c) { |
|||
t.Errorf("Clone(%v) = %v", m, c) |
|||
} |
|||
} |
|||
|
|||
var mergeTests = []struct { |
|||
src, dst, want proto.Message |
|||
}{ |
|||
{ |
|||
src: &pb.MyMessage{ |
|||
Count: proto.Int32(42), |
|||
}, |
|||
dst: &pb.MyMessage{ |
|||
Name: proto.String("Dave"), |
|||
}, |
|||
want: &pb.MyMessage{ |
|||
Count: proto.Int32(42), |
|||
Name: proto.String("Dave"), |
|||
}, |
|||
}, |
|||
{ |
|||
src: &pb.MyMessage{ |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("hey"), |
|||
Connected: proto.Bool(true), |
|||
}, |
|||
Pet: []string{"horsey"}, |
|||
Others: []*pb.OtherMessage{ |
|||
{ |
|||
Value: []byte("some bytes"), |
|||
}, |
|||
}, |
|||
}, |
|||
dst: &pb.MyMessage{ |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("niles"), |
|||
Port: proto.Int32(9099), |
|||
}, |
|||
Pet: []string{"bunny", "kitty"}, |
|||
Others: []*pb.OtherMessage{ |
|||
{ |
|||
Key: proto.Int64(31415926535), |
|||
}, |
|||
{ |
|||
// Explicitly test a src=nil field
|
|||
Inner: nil, |
|||
}, |
|||
}, |
|||
}, |
|||
want: &pb.MyMessage{ |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("hey"), |
|||
Connected: proto.Bool(true), |
|||
Port: proto.Int32(9099), |
|||
}, |
|||
Pet: []string{"bunny", "kitty", "horsey"}, |
|||
Others: []*pb.OtherMessage{ |
|||
{ |
|||
Key: proto.Int64(31415926535), |
|||
}, |
|||
{}, |
|||
{ |
|||
Value: []byte("some bytes"), |
|||
}, |
|||
}, |
|||
}, |
|||
}, |
|||
{ |
|||
src: &pb.MyMessage{ |
|||
RepBytes: [][]byte{[]byte("wow")}, |
|||
}, |
|||
dst: &pb.MyMessage{ |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: proto.Int32(6), |
|||
}, |
|||
RepBytes: [][]byte{[]byte("sham")}, |
|||
}, |
|||
want: &pb.MyMessage{ |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: proto.Int32(6), |
|||
}, |
|||
RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, |
|||
}, |
|||
}, |
|||
// Check that a scalar bytes field replaces rather than appends.
|
|||
{ |
|||
src: &pb.OtherMessage{Value: []byte("foo")}, |
|||
dst: &pb.OtherMessage{Value: []byte("bar")}, |
|||
want: &pb.OtherMessage{Value: []byte("foo")}, |
|||
}, |
|||
{ |
|||
src: &pb.MessageWithMap{ |
|||
NameMapping: map[int32]string{6: "Nigel"}, |
|||
MsgMapping: map[int64]*pb.FloatingPoint{ |
|||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, |
|||
0x4002: &pb.FloatingPoint{ |
|||
F: proto.Float64(2.0), |
|||
}, |
|||
}, |
|||
ByteMapping: map[bool][]byte{true: []byte("wowsa")}, |
|||
}, |
|||
dst: &pb.MessageWithMap{ |
|||
NameMapping: map[int32]string{ |
|||
6: "Bruce", // should be overwritten
|
|||
7: "Andrew", |
|||
}, |
|||
MsgMapping: map[int64]*pb.FloatingPoint{ |
|||
0x4002: &pb.FloatingPoint{ |
|||
F: proto.Float64(3.0), |
|||
Exact: proto.Bool(true), |
|||
}, // the entire message should be overwritten
|
|||
}, |
|||
}, |
|||
want: &pb.MessageWithMap{ |
|||
NameMapping: map[int32]string{ |
|||
6: "Nigel", |
|||
7: "Andrew", |
|||
}, |
|||
MsgMapping: map[int64]*pb.FloatingPoint{ |
|||
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, |
|||
0x4002: &pb.FloatingPoint{ |
|||
F: proto.Float64(2.0), |
|||
}, |
|||
}, |
|||
ByteMapping: map[bool][]byte{true: []byte("wowsa")}, |
|||
}, |
|||
}, |
|||
// proto3 shouldn't merge zero values,
|
|||
// in the same way that proto2 shouldn't merge nils.
|
|||
{ |
|||
src: &proto3pb.Message{ |
|||
Name: "Aaron", |
|||
Data: []byte(""), // zero value, but not nil
|
|||
}, |
|||
dst: &proto3pb.Message{ |
|||
HeightInCm: 176, |
|||
Data: []byte("texas!"), |
|||
}, |
|||
want: &proto3pb.Message{ |
|||
Name: "Aaron", |
|||
HeightInCm: 176, |
|||
Data: []byte("texas!"), |
|||
}, |
|||
}, |
|||
// Oneof fields should merge by assignment.
|
|||
{ |
|||
src: &pb.Communique{ |
|||
Union: &pb.Communique_Number{41}, |
|||
}, |
|||
dst: &pb.Communique{ |
|||
Union: &pb.Communique_Name{"Bobby Tables"}, |
|||
}, |
|||
want: &pb.Communique{ |
|||
Union: &pb.Communique_Number{41}, |
|||
}, |
|||
}, |
|||
// Oneof nil is the same as not set.
|
|||
{ |
|||
src: &pb.Communique{}, |
|||
dst: &pb.Communique{ |
|||
Union: &pb.Communique_Name{"Bobby Tables"}, |
|||
}, |
|||
want: &pb.Communique{ |
|||
Union: &pb.Communique_Name{"Bobby Tables"}, |
|||
}, |
|||
}, |
|||
{ |
|||
src: &proto3pb.Message{ |
|||
Terrain: map[string]*proto3pb.Nested{ |
|||
"kay_a": &proto3pb.Nested{Cute: true}, // replace
|
|||
"kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
|
|||
}, |
|||
}, |
|||
dst: &proto3pb.Message{ |
|||
Terrain: map[string]*proto3pb.Nested{ |
|||
"kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
|
|||
"kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
|
|||
}, |
|||
}, |
|||
want: &proto3pb.Message{ |
|||
Terrain: map[string]*proto3pb.Nested{ |
|||
"kay_a": &proto3pb.Nested{Cute: true}, |
|||
"kay_b": &proto3pb.Nested{Bunny: "rabbit"}, |
|||
"kay_c": &proto3pb.Nested{Bunny: "bunny"}, |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
|
|||
func TestMerge(t *testing.T) { |
|||
for _, m := range mergeTests { |
|||
got := proto.Clone(m.dst) |
|||
proto.Merge(got, m.src) |
|||
if !proto.Equal(got, m.want) { |
|||
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,970 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
/* |
|||
* Routines for decoding protocol buffer data to construct in-memory representations. |
|||
*/ |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"os" |
|||
"reflect" |
|||
) |
|||
|
|||
// errOverflow is returned when an integer is too large to be represented.
|
|||
var errOverflow = errors.New("proto: integer overflow") |
|||
|
|||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
|||
// wire type is encountered. It does not get returned to user code.
|
|||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") |
|||
|
|||
// The fundamental decoders that interpret bytes on the wire.
|
|||
// Those that take integer types all return uint64 and are
|
|||
// therefore of type valueDecoder.
|
|||
|
|||
// DecodeVarint reads a varint-encoded integer from the slice.
|
|||
// It returns the integer and the number of bytes consumed, or
|
|||
// zero if there is not enough.
|
|||
// This is the format for the
|
|||
// int32, int64, uint32, uint64, bool, and enum
|
|||
// protocol buffer types.
|
|||
func DecodeVarint(buf []byte) (x uint64, n int) { |
|||
for shift := uint(0); shift < 64; shift += 7 { |
|||
if n >= len(buf) { |
|||
return 0, 0 |
|||
} |
|||
b := uint64(buf[n]) |
|||
n++ |
|||
x |= (b & 0x7F) << shift |
|||
if (b & 0x80) == 0 { |
|||
return x, n |
|||
} |
|||
} |
|||
|
|||
// The number is too large to represent in a 64-bit value.
|
|||
return 0, 0 |
|||
} |
|||
|
|||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) { |
|||
i := p.index |
|||
l := len(p.buf) |
|||
|
|||
for shift := uint(0); shift < 64; shift += 7 { |
|||
if i >= l { |
|||
err = io.ErrUnexpectedEOF |
|||
return |
|||
} |
|||
b := p.buf[i] |
|||
i++ |
|||
x |= (uint64(b) & 0x7F) << shift |
|||
if b < 0x80 { |
|||
p.index = i |
|||
return |
|||
} |
|||
} |
|||
|
|||
// The number is too large to represent in a 64-bit value.
|
|||
err = errOverflow |
|||
return |
|||
} |
|||
|
|||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
|||
// This is the format for the
|
|||
// int32, int64, uint32, uint64, bool, and enum
|
|||
// protocol buffer types.
|
|||
func (p *Buffer) DecodeVarint() (x uint64, err error) { |
|||
i := p.index |
|||
buf := p.buf |
|||
|
|||
if i >= len(buf) { |
|||
return 0, io.ErrUnexpectedEOF |
|||
} else if buf[i] < 0x80 { |
|||
p.index++ |
|||
return uint64(buf[i]), nil |
|||
} else if len(buf)-i < 10 { |
|||
return p.decodeVarintSlow() |
|||
} |
|||
|
|||
var b uint64 |
|||
// we already checked the first byte
|
|||
x = uint64(buf[i]) - 0x80 |
|||
i++ |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 7 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 7 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 14 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 14 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 21 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 21 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 28 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 28 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 35 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 35 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 42 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 42 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 49 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 49 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 56 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
x -= 0x80 << 56 |
|||
|
|||
b = uint64(buf[i]) |
|||
i++ |
|||
x += b << 63 |
|||
if b&0x80 == 0 { |
|||
goto done |
|||
} |
|||
// x -= 0x80 << 63 // Always zero.
|
|||
|
|||
return 0, errOverflow |
|||
|
|||
done: |
|||
p.index = i |
|||
return x, nil |
|||
} |
|||
|
|||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
|||
// This is the format for the
|
|||
// fixed64, sfixed64, and double protocol buffer types.
|
|||
func (p *Buffer) DecodeFixed64() (x uint64, err error) { |
|||
// x, err already 0
|
|||
i := p.index + 8 |
|||
if i < 0 || i > len(p.buf) { |
|||
err = io.ErrUnexpectedEOF |
|||
return |
|||
} |
|||
p.index = i |
|||
|
|||
x = uint64(p.buf[i-8]) |
|||
x |= uint64(p.buf[i-7]) << 8 |
|||
x |= uint64(p.buf[i-6]) << 16 |
|||
x |= uint64(p.buf[i-5]) << 24 |
|||
x |= uint64(p.buf[i-4]) << 32 |
|||
x |= uint64(p.buf[i-3]) << 40 |
|||
x |= uint64(p.buf[i-2]) << 48 |
|||
x |= uint64(p.buf[i-1]) << 56 |
|||
return |
|||
} |
|||
|
|||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
|||
// This is the format for the
|
|||
// fixed32, sfixed32, and float protocol buffer types.
|
|||
func (p *Buffer) DecodeFixed32() (x uint64, err error) { |
|||
// x, err already 0
|
|||
i := p.index + 4 |
|||
if i < 0 || i > len(p.buf) { |
|||
err = io.ErrUnexpectedEOF |
|||
return |
|||
} |
|||
p.index = i |
|||
|
|||
x = uint64(p.buf[i-4]) |
|||
x |= uint64(p.buf[i-3]) << 8 |
|||
x |= uint64(p.buf[i-2]) << 16 |
|||
x |= uint64(p.buf[i-1]) << 24 |
|||
return |
|||
} |
|||
|
|||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
|||
// from the Buffer.
|
|||
// This is the format used for the sint64 protocol buffer type.
|
|||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) { |
|||
x, err = p.DecodeVarint() |
|||
if err != nil { |
|||
return |
|||
} |
|||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) |
|||
return |
|||
} |
|||
|
|||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
|||
// from the Buffer.
|
|||
// This is the format used for the sint32 protocol buffer type.
|
|||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) { |
|||
x, err = p.DecodeVarint() |
|||
if err != nil { |
|||
return |
|||
} |
|||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) |
|||
return |
|||
} |
|||
|
|||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
|||
// bytes, embedded messages
|
|||
|
|||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
|||
// This is the format used for the bytes protocol buffer
|
|||
// type and for embedded messages.
|
|||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { |
|||
n, err := p.DecodeVarint() |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
nb := int(n) |
|||
if nb < 0 { |
|||
return nil, fmt.Errorf("proto: bad byte length %d", nb) |
|||
} |
|||
end := p.index + nb |
|||
if end < p.index || end > len(p.buf) { |
|||
return nil, io.ErrUnexpectedEOF |
|||
} |
|||
|
|||
if !alloc { |
|||
// todo: check if can get more uses of alloc=false
|
|||
buf = p.buf[p.index:end] |
|||
p.index += nb |
|||
return |
|||
} |
|||
|
|||
buf = make([]byte, nb) |
|||
copy(buf, p.buf[p.index:]) |
|||
p.index += nb |
|||
return |
|||
} |
|||
|
|||
// DecodeStringBytes reads an encoded string from the Buffer.
|
|||
// This is the format used for the proto2 string type.
|
|||
func (p *Buffer) DecodeStringBytes() (s string, err error) { |
|||
buf, err := p.DecodeRawBytes(false) |
|||
if err != nil { |
|||
return |
|||
} |
|||
return string(buf), nil |
|||
} |
|||
|
|||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
|||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
|||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { |
|||
oi := o.index |
|||
|
|||
err := o.skip(t, tag, wire) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if !unrecField.IsValid() { |
|||
return nil |
|||
} |
|||
|
|||
ptr := structPointer_Bytes(base, unrecField) |
|||
|
|||
// Add the skipped field to struct field
|
|||
obuf := o.buf |
|||
|
|||
o.buf = *ptr |
|||
o.EncodeVarint(uint64(tag<<3 | wire)) |
|||
*ptr = append(o.buf, obuf[oi:o.index]...) |
|||
|
|||
o.buf = obuf |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error { |
|||
|
|||
var u uint64 |
|||
var err error |
|||
|
|||
switch wire { |
|||
case WireVarint: |
|||
_, err = o.DecodeVarint() |
|||
case WireFixed64: |
|||
_, err = o.DecodeFixed64() |
|||
case WireBytes: |
|||
_, err = o.DecodeRawBytes(false) |
|||
case WireFixed32: |
|||
_, err = o.DecodeFixed32() |
|||
case WireStartGroup: |
|||
for { |
|||
u, err = o.DecodeVarint() |
|||
if err != nil { |
|||
break |
|||
} |
|||
fwire := int(u & 0x7) |
|||
if fwire == WireEndGroup { |
|||
break |
|||
} |
|||
ftag := int(u >> 3) |
|||
err = o.skip(t, ftag, fwire) |
|||
if err != nil { |
|||
break |
|||
} |
|||
} |
|||
default: |
|||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// Unmarshaler is the interface representing objects that can
|
|||
// unmarshal themselves. The method should reset the receiver before
|
|||
// decoding starts. The argument points to data that may be
|
|||
// overwritten, so implementations should not keep references to the
|
|||
// buffer.
|
|||
type Unmarshaler interface { |
|||
Unmarshal([]byte) error |
|||
} |
|||
|
|||
// Unmarshal parses the protocol buffer representation in buf and places the
|
|||
// decoded result in pb. If the struct underlying pb does not match
|
|||
// the data in buf, the results can be unpredictable.
|
|||
//
|
|||
// Unmarshal resets pb before starting to unmarshal, so any
|
|||
// existing data in pb is always removed. Use UnmarshalMerge
|
|||
// to preserve and append to existing data.
|
|||
func Unmarshal(buf []byte, pb Message) error { |
|||
pb.Reset() |
|||
return UnmarshalMerge(buf, pb) |
|||
} |
|||
|
|||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
|||
// writes the decoded result to pb. If the struct underlying pb does not match
|
|||
// the data in buf, the results can be unpredictable.
|
|||
//
|
|||
// UnmarshalMerge merges into existing data in pb.
|
|||
// Most code should use Unmarshal instead.
|
|||
func UnmarshalMerge(buf []byte, pb Message) error { |
|||
// If the object can unmarshal itself, let it.
|
|||
if u, ok := pb.(Unmarshaler); ok { |
|||
return u.Unmarshal(buf) |
|||
} |
|||
return NewBuffer(buf).Unmarshal(pb) |
|||
} |
|||
|
|||
// DecodeMessage reads a count-delimited message from the Buffer.
|
|||
func (p *Buffer) DecodeMessage(pb Message) error { |
|||
enc, err := p.DecodeRawBytes(false) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return NewBuffer(enc).Unmarshal(pb) |
|||
} |
|||
|
|||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
|||
func (p *Buffer) DecodeGroup(pb Message) error { |
|||
typ, base, err := getbase(pb) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) |
|||
} |
|||
|
|||
// Unmarshal parses the protocol buffer representation in the
|
|||
// Buffer and places the decoded result in pb. If the struct
|
|||
// underlying pb does not match the data in the buffer, the results can be
|
|||
// unpredictable.
|
|||
//
|
|||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
|||
func (p *Buffer) Unmarshal(pb Message) error { |
|||
// If the object can unmarshal itself, let it.
|
|||
if u, ok := pb.(Unmarshaler); ok { |
|||
err := u.Unmarshal(p.buf[p.index:]) |
|||
p.index = len(p.buf) |
|||
return err |
|||
} |
|||
|
|||
typ, base, err := getbase(pb) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) |
|||
|
|||
if collectStats { |
|||
stats.Decode++ |
|||
} |
|||
|
|||
return err |
|||
} |
|||
|
|||
// unmarshalType does the work of unmarshaling a structure.
|
|||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { |
|||
var state errorState |
|||
required, reqFields := prop.reqCount, uint64(0) |
|||
|
|||
var err error |
|||
for err == nil && o.index < len(o.buf) { |
|||
oi := o.index |
|||
var u uint64 |
|||
u, err = o.DecodeVarint() |
|||
if err != nil { |
|||
break |
|||
} |
|||
wire := int(u & 0x7) |
|||
if wire == WireEndGroup { |
|||
if is_group { |
|||
if required > 0 { |
|||
// Not enough information to determine the exact field.
|
|||
// (See below.)
|
|||
return &RequiredNotSetError{"{Unknown}"} |
|||
} |
|||
return nil // input is satisfied
|
|||
} |
|||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st) |
|||
} |
|||
tag := int(u >> 3) |
|||
if tag <= 0 { |
|||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) |
|||
} |
|||
fieldnum, ok := prop.decoderTags.get(tag) |
|||
if !ok { |
|||
// Maybe it's an extension?
|
|||
if prop.extendable { |
|||
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { |
|||
if err = o.skip(st, tag, wire); err == nil { |
|||
extmap := e.extensionsWrite() |
|||
ext := extmap[int32(tag)] // may be missing
|
|||
ext.enc = append(ext.enc, o.buf[oi:o.index]...) |
|||
extmap[int32(tag)] = ext |
|||
} |
|||
continue |
|||
} |
|||
} |
|||
// Maybe it's a oneof?
|
|||
if prop.oneofUnmarshaler != nil { |
|||
m := structPointer_Interface(base, st).(Message) |
|||
// First return value indicates whether tag is a oneof field.
|
|||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o) |
|||
if err == ErrInternalBadWireType { |
|||
// Map the error to something more descriptive.
|
|||
// Do the formatting here to save generated code space.
|
|||
err = fmt.Errorf("bad wiretype for oneof field in %T", m) |
|||
} |
|||
if ok { |
|||
continue |
|||
} |
|||
} |
|||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField) |
|||
continue |
|||
} |
|||
p := prop.Prop[fieldnum] |
|||
|
|||
if p.dec == nil { |
|||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) |
|||
continue |
|||
} |
|||
dec := p.dec |
|||
if wire != WireStartGroup && wire != p.WireType { |
|||
if wire == WireBytes && p.packedDec != nil { |
|||
// a packable field
|
|||
dec = p.packedDec |
|||
} else { |
|||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) |
|||
continue |
|||
} |
|||
} |
|||
decErr := dec(o, p, base) |
|||
if decErr != nil && !state.shouldContinue(decErr, p) { |
|||
err = decErr |
|||
} |
|||
if err == nil && p.Required { |
|||
// Successfully decoded a required field.
|
|||
if tag <= 64 { |
|||
// use bitmap for fields 1-64 to catch field reuse.
|
|||
var mask uint64 = 1 << uint64(tag-1) |
|||
if reqFields&mask == 0 { |
|||
// new required field
|
|||
reqFields |= mask |
|||
required-- |
|||
} |
|||
} else { |
|||
// This is imprecise. It can be fooled by a required field
|
|||
// with a tag > 64 that is encoded twice; that's very rare.
|
|||
// A fully correct implementation would require allocating
|
|||
// a data structure, which we would like to avoid.
|
|||
required-- |
|||
} |
|||
} |
|||
} |
|||
if err == nil { |
|||
if is_group { |
|||
return io.ErrUnexpectedEOF |
|||
} |
|||
if state.err != nil { |
|||
return state.err |
|||
} |
|||
if required > 0 { |
|||
// Not enough information to determine the exact field. If we use extra
|
|||
// CPU, we could determine the field only if the missing required field
|
|||
// has a tag <= 64 and we check reqFields.
|
|||
return &RequiredNotSetError{"{Unknown}"} |
|||
} |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// Individual type decoders
|
|||
// For each,
|
|||
// u is the decoded value,
|
|||
// v is a pointer to the field (pointer) in the struct
|
|||
|
|||
// Sizes of the pools to allocate inside the Buffer.
|
|||
// The goal is modest amortization and allocation
|
|||
// on at least 16-byte boundaries.
|
|||
const ( |
|||
boolPoolSize = 16 |
|||
uint32PoolSize = 8 |
|||
uint64PoolSize = 4 |
|||
) |
|||
|
|||
// Decode a bool.
|
|||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if len(o.bools) == 0 { |
|||
o.bools = make([]bool, boolPoolSize) |
|||
} |
|||
o.bools[0] = u != 0 |
|||
*structPointer_Bool(base, p.field) = &o.bools[0] |
|||
o.bools = o.bools[1:] |
|||
return nil |
|||
} |
|||
|
|||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*structPointer_BoolVal(base, p.field) = u != 0 |
|||
return nil |
|||
} |
|||
|
|||
// Decode an int32.
|
|||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) |
|||
return nil |
|||
} |
|||
|
|||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) |
|||
return nil |
|||
} |
|||
|
|||
// Decode an int64.
|
|||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
word64_Set(structPointer_Word64(base, p.field), o, u) |
|||
return nil |
|||
} |
|||
|
|||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a string.
|
|||
func (o *Buffer) dec_string(p *Properties, base structPointer) error { |
|||
s, err := o.DecodeStringBytes() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*structPointer_String(base, p.field) = &s |
|||
return nil |
|||
} |
|||
|
|||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { |
|||
s, err := o.DecodeStringBytes() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*structPointer_StringVal(base, p.field) = s |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of bytes ([]byte).
|
|||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { |
|||
b, err := o.DecodeRawBytes(true) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*structPointer_Bytes(base, p.field) = b |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of bools ([]bool).
|
|||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
v := structPointer_BoolSlice(base, p.field) |
|||
*v = append(*v, u != 0) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of bools ([]bool) in packed format.
|
|||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { |
|||
v := structPointer_BoolSlice(base, p.field) |
|||
|
|||
nn, err := o.DecodeVarint() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
nb := int(nn) // number of bytes of encoded bools
|
|||
fin := o.index + nb |
|||
if fin < o.index { |
|||
return errOverflow |
|||
} |
|||
|
|||
y := *v |
|||
for o.index < fin { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
y = append(y, u != 0) |
|||
} |
|||
|
|||
*v = y |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of int32s ([]int32).
|
|||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
structPointer_Word32Slice(base, p.field).Append(uint32(u)) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of int32s ([]int32) in packed format.
|
|||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { |
|||
v := structPointer_Word32Slice(base, p.field) |
|||
|
|||
nn, err := o.DecodeVarint() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
nb := int(nn) // number of bytes of encoded int32s
|
|||
|
|||
fin := o.index + nb |
|||
if fin < o.index { |
|||
return errOverflow |
|||
} |
|||
for o.index < fin { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
v.Append(uint32(u)) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of int64s ([]int64).
|
|||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
structPointer_Word64Slice(base, p.field).Append(u) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of int64s ([]int64) in packed format.
|
|||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { |
|||
v := structPointer_Word64Slice(base, p.field) |
|||
|
|||
nn, err := o.DecodeVarint() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
nb := int(nn) // number of bytes of encoded int64s
|
|||
|
|||
fin := o.index + nb |
|||
if fin < o.index { |
|||
return errOverflow |
|||
} |
|||
for o.index < fin { |
|||
u, err := p.valDec(o) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
v.Append(u) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of strings ([]string).
|
|||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { |
|||
s, err := o.DecodeStringBytes() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
v := structPointer_StringSlice(base, p.field) |
|||
*v = append(*v, s) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a slice of slice of bytes ([][]byte).
|
|||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { |
|||
b, err := o.DecodeRawBytes(true) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
v := structPointer_BytesSlice(base, p.field) |
|||
*v = append(*v, b) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a map field.
|
|||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { |
|||
raw, err := o.DecodeRawBytes(false) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
oi := o.index // index at the end of this map entry
|
|||
o.index -= len(raw) // move buffer back to start of map entry
|
|||
|
|||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
|||
if mptr.Elem().IsNil() { |
|||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) |
|||
} |
|||
v := mptr.Elem() // map[K]V
|
|||
|
|||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
|||
// See enc_new_map for why.
|
|||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
|||
keybase := toStructPointer(keyptr.Addr()) // **K
|
|||
|
|||
var valbase structPointer |
|||
var valptr reflect.Value |
|||
switch p.mtype.Elem().Kind() { |
|||
case reflect.Slice: |
|||
// []byte
|
|||
var dummy []byte |
|||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
|||
valbase = toStructPointer(valptr) // *[]byte
|
|||
case reflect.Ptr: |
|||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
|||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|||
valptr.Set(reflect.New(valptr.Type().Elem())) |
|||
valbase = toStructPointer(valptr) |
|||
default: |
|||
// everything else
|
|||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|||
valbase = toStructPointer(valptr.Addr()) // **V
|
|||
} |
|||
|
|||
// Decode.
|
|||
// This parses a restricted wire format, namely the encoding of a message
|
|||
// with two fields. See enc_new_map for the format.
|
|||
for o.index < oi { |
|||
// tagcode for key and value properties are always a single byte
|
|||
// because they have tags 1 and 2.
|
|||
tagcode := o.buf[o.index] |
|||
o.index++ |
|||
switch tagcode { |
|||
case p.mkeyprop.tagcode[0]: |
|||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { |
|||
return err |
|||
} |
|||
case p.mvalprop.tagcode[0]: |
|||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { |
|||
return err |
|||
} |
|||
default: |
|||
// TODO: Should we silently skip this instead?
|
|||
return fmt.Errorf("proto: bad map data tag %d", raw[0]) |
|||
} |
|||
} |
|||
keyelem, valelem := keyptr.Elem(), valptr.Elem() |
|||
if !keyelem.IsValid() { |
|||
keyelem = reflect.Zero(p.mtype.Key()) |
|||
} |
|||
if !valelem.IsValid() { |
|||
valelem = reflect.Zero(p.mtype.Elem()) |
|||
} |
|||
|
|||
v.SetMapIndex(keyelem, valelem) |
|||
return nil |
|||
} |
|||
|
|||
// Decode a group.
|
|||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { |
|||
bas := structPointer_GetStructPointer(base, p.field) |
|||
if structPointer_IsNil(bas) { |
|||
// allocate new nested message
|
|||
bas = toStructPointer(reflect.New(p.stype)) |
|||
structPointer_SetStructPointer(base, p.field, bas) |
|||
} |
|||
return o.unmarshalType(p.stype, p.sprop, true, bas) |
|||
} |
|||
|
|||
// Decode an embedded message.
|
|||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { |
|||
raw, e := o.DecodeRawBytes(false) |
|||
if e != nil { |
|||
return e |
|||
} |
|||
|
|||
bas := structPointer_GetStructPointer(base, p.field) |
|||
if structPointer_IsNil(bas) { |
|||
// allocate new nested message
|
|||
bas = toStructPointer(reflect.New(p.stype)) |
|||
structPointer_SetStructPointer(base, p.field, bas) |
|||
} |
|||
|
|||
// If the object can unmarshal itself, let it.
|
|||
if p.isUnmarshaler { |
|||
iv := structPointer_Interface(bas, p.stype) |
|||
return iv.(Unmarshaler).Unmarshal(raw) |
|||
} |
|||
|
|||
obuf := o.buf |
|||
oi := o.index |
|||
o.buf = raw |
|||
o.index = 0 |
|||
|
|||
err = o.unmarshalType(p.stype, p.sprop, false, bas) |
|||
o.buf = obuf |
|||
o.index = oi |
|||
|
|||
return err |
|||
} |
|||
|
|||
// Decode a slice of embedded messages.
|
|||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { |
|||
return o.dec_slice_struct(p, false, base) |
|||
} |
|||
|
|||
// Decode a slice of embedded groups.
|
|||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { |
|||
return o.dec_slice_struct(p, true, base) |
|||
} |
|||
|
|||
// Decode a slice of structs ([]*struct).
|
|||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { |
|||
v := reflect.New(p.stype) |
|||
bas := toStructPointer(v) |
|||
structPointer_StructPointerSlice(base, p.field).Append(bas) |
|||
|
|||
if is_group { |
|||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas) |
|||
return err |
|||
} |
|||
|
|||
raw, err := o.DecodeRawBytes(false) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// If the object can unmarshal itself, let it.
|
|||
if p.isUnmarshaler { |
|||
iv := v.Interface() |
|||
return iv.(Unmarshaler).Unmarshal(raw) |
|||
} |
|||
|
|||
obuf := o.buf |
|||
oi := o.index |
|||
o.buf = raw |
|||
o.index = 0 |
|||
|
|||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas) |
|||
|
|||
o.buf = obuf |
|||
o.index = oi |
|||
|
|||
return err |
|||
} |
|||
@ -0,0 +1,256 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"fmt" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
tpb "github.com/golang/protobuf/proto/proto3_proto" |
|||
) |
|||
|
|||
var ( |
|||
bytesBlackhole []byte |
|||
msgBlackhole = new(tpb.Message) |
|||
) |
|||
|
|||
// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
|
|||
// 2 bytes long).
|
|||
func BenchmarkVarint32ArraySmall(b *testing.B) { |
|||
for i := uint(1); i <= 10; i++ { |
|||
dist := genInt32Dist([7]int{0, 3, 1}, 1<<i) |
|||
raw, err := proto.Marshal(&tpb.Message{ |
|||
ShortKey: dist, |
|||
}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) { |
|||
scratchBuf := proto.NewBuffer(nil) |
|||
b.ResetTimer() |
|||
for k := 0; k < b.N; k++ { |
|||
scratchBuf.SetBuf(raw) |
|||
msgBlackhole.Reset() |
|||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkVarint32ArrayLarge shows the performance on an array of large int32 fields (3 and
|
|||
// 4 bytes long, with a small number of 1, 2, 5 and 10 byte long versions).
|
|||
func BenchmarkVarint32ArrayLarge(b *testing.B) { |
|||
for i := uint(1); i <= 10; i++ { |
|||
dist := genInt32Dist([7]int{0, 1, 2, 4, 8, 1, 1}, 1<<i) |
|||
raw, err := proto.Marshal(&tpb.Message{ |
|||
ShortKey: dist, |
|||
}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) { |
|||
scratchBuf := proto.NewBuffer(nil) |
|||
b.ResetTimer() |
|||
for k := 0; k < b.N; k++ { |
|||
scratchBuf.SetBuf(raw) |
|||
msgBlackhole.Reset() |
|||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkVarint64ArraySmall shows the performance on an array of small int64 fields (1 and
|
|||
// 2 bytes long).
|
|||
func BenchmarkVarint64ArraySmall(b *testing.B) { |
|||
for i := uint(1); i <= 10; i++ { |
|||
dist := genUint64Dist([11]int{0, 3, 1}, 1<<i) |
|||
raw, err := proto.Marshal(&tpb.Message{ |
|||
Key: dist, |
|||
}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) { |
|||
scratchBuf := proto.NewBuffer(nil) |
|||
b.ResetTimer() |
|||
for k := 0; k < b.N; k++ { |
|||
scratchBuf.SetBuf(raw) |
|||
msgBlackhole.Reset() |
|||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkVarint64ArrayLarge shows the performance on an array of large int64 fields (6, 7,
|
|||
// and 8 bytes long with a small number of the other sizes).
|
|||
func BenchmarkVarint64ArrayLarge(b *testing.B) { |
|||
for i := uint(1); i <= 10; i++ { |
|||
dist := genUint64Dist([11]int{0, 1, 1, 2, 4, 8, 16, 32, 16, 1, 1}, 1<<i) |
|||
raw, err := proto.Marshal(&tpb.Message{ |
|||
Key: dist, |
|||
}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.Run(fmt.Sprintf("Len%v", len(dist)), func(b *testing.B) { |
|||
scratchBuf := proto.NewBuffer(nil) |
|||
b.ResetTimer() |
|||
for k := 0; k < b.N; k++ { |
|||
scratchBuf.SetBuf(raw) |
|||
msgBlackhole.Reset() |
|||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each
|
|||
// containing a small number of large (3, 4, and 5 byte) repeated int64s.
|
|||
func BenchmarkVarint64ArrayMixed(b *testing.B) { |
|||
for i := uint(1); i <= 1<<5; i <<= 1 { |
|||
dist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i)) |
|||
// number of sub fields
|
|||
for k := uint(1); k <= 1<<10; k <<= 2 { |
|||
msg := &tpb.Message{} |
|||
for m := uint(0); m < k; m++ { |
|||
msg.Children = append(msg.Children, &tpb.Message{ |
|||
Key: dist, |
|||
}) |
|||
} |
|||
raw, err := proto.Marshal(msg) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.Run(fmt.Sprintf("Fields%vLen%v", k, i), func(b *testing.B) { |
|||
scratchBuf := proto.NewBuffer(nil) |
|||
b.ResetTimer() |
|||
for k := 0; k < b.N; k++ { |
|||
scratchBuf.SetBuf(raw) |
|||
msgBlackhole.Reset() |
|||
if err := scratchBuf.Unmarshal(msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// genInt32Dist generates a slice of ints that will match the size distribution of dist.
|
|||
// A size of 6 corresponds to a max length varint32, which is 10 bytes. The distribution
|
|||
// is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
|||
func genInt32Dist(dist [7]int, count int) (dest []int32) { |
|||
for i := 0; i < count; i++ { |
|||
for k := 0; k < len(dist); k++ { |
|||
var num int32 |
|||
switch k { |
|||
case 1: |
|||
num = 1<<7 - 1 |
|||
case 2: |
|||
num = 1<<14 - 1 |
|||
case 3: |
|||
num = 1<<21 - 1 |
|||
case 4: |
|||
num = 1<<28 - 1 |
|||
case 5: |
|||
num = 1<<29 - 1 |
|||
case 6: |
|||
num = -1 |
|||
} |
|||
for m := 0; m < dist[k]; m++ { |
|||
dest = append(dest, num) |
|||
} |
|||
} |
|||
} |
|||
return |
|||
} |
|||
|
|||
// genUint64Dist generates a slice of ints that will match the size distribution of dist.
|
|||
// The distribution is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).
|
|||
func genUint64Dist(dist [11]int, count int) (dest []uint64) { |
|||
for i := 0; i < count; i++ { |
|||
for k := 0; k < len(dist); k++ { |
|||
var num uint64 |
|||
switch k { |
|||
case 1: |
|||
num = 1<<7 - 1 |
|||
case 2: |
|||
num = 1<<14 - 1 |
|||
case 3: |
|||
num = 1<<21 - 1 |
|||
case 4: |
|||
num = 1<<28 - 1 |
|||
case 5: |
|||
num = 1<<35 - 1 |
|||
case 6: |
|||
num = 1<<42 - 1 |
|||
case 7: |
|||
num = 1<<49 - 1 |
|||
case 8: |
|||
num = 1<<56 - 1 |
|||
case 9: |
|||
num = 1<<63 - 1 |
|||
case 10: |
|||
num = 1<<64 - 1 |
|||
} |
|||
for m := 0; m < dist[k]; m++ { |
|||
dest = append(dest, num) |
|||
} |
|||
} |
|||
} |
|||
return |
|||
} |
|||
|
|||
// BenchmarkDecodeEmpty measures the overhead of doing the minimal possible decode.
|
|||
func BenchmarkDecodeEmpty(b *testing.B) { |
|||
raw, err := proto.Marshal(&tpb.Message{}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
if err := proto.Unmarshal(raw, msgBlackhole); err != nil { |
|||
b.Error("wrong decode", err) |
|||
} |
|||
} |
|||
} |
|||
1355
vendor/src/github.com/golang/protobuf/proto/encode.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,83 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"strconv" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
tpb "github.com/golang/protobuf/proto/proto3_proto" |
|||
"github.com/golang/protobuf/ptypes" |
|||
) |
|||
|
|||
var ( |
|||
blackhole []byte |
|||
) |
|||
|
|||
// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
|
|||
// same.
|
|||
func BenchmarkAny(b *testing.B) { |
|||
data := make([]byte, 1<<20) |
|||
quantum := 1 << 10 |
|||
for i := uint(0); i <= 10; i++ { |
|||
b.Run(strconv.Itoa(quantum<<i), func(b *testing.B) { |
|||
for k := 0; k < b.N; k++ { |
|||
inner := &tpb.Message{ |
|||
Data: data[:quantum<<i], |
|||
} |
|||
outer, err := ptypes.MarshalAny(inner) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
raw, err := proto.Marshal(&tpb.Message{ |
|||
Anything: outer, |
|||
}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
blackhole = raw |
|||
} |
|||
}) |
|||
} |
|||
} |
|||
|
|||
// BenchmarkEmpy measures the overhead of doing the minimal possible encode.
|
|||
func BenchmarkEmpy(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
raw, err := proto.Marshal(&tpb.Message{}) |
|||
if err != nil { |
|||
b.Error("wrong encode", err) |
|||
} |
|||
blackhole = raw |
|||
} |
|||
} |
|||
@ -0,0 +1,300 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// Protocol buffer comparison.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"bytes" |
|||
"log" |
|||
"reflect" |
|||
"strings" |
|||
) |
|||
|
|||
/* |
|||
Equal returns true iff protocol buffers a and b are equal. |
|||
The arguments must both be pointers to protocol buffer structs. |
|||
|
|||
Equality is defined in this way: |
|||
- Two messages are equal iff they are the same type, |
|||
corresponding fields are equal, unknown field sets |
|||
are equal, and extensions sets are equal. |
|||
- Two set scalar fields are equal iff their values are equal. |
|||
If the fields are of a floating-point type, remember that |
|||
NaN != x for all x, including NaN. If the message is defined |
|||
in a proto3 .proto file, fields are not "set"; specifically, |
|||
zero length proto3 "bytes" fields are equal (nil == {}). |
|||
- Two repeated fields are equal iff their lengths are the same, |
|||
and their corresponding elements are equal. Note a "bytes" field, |
|||
although represented by []byte, is not a repeated field and the |
|||
rule for the scalar fields described above applies. |
|||
- Two unset fields are equal. |
|||
- Two unknown field sets are equal if their current |
|||
encoded state is equal. |
|||
- Two extension sets are equal iff they have corresponding |
|||
elements that are pairwise equal. |
|||
- Two map fields are equal iff their lengths are the same, |
|||
and they contain the same set of elements. Zero-length map |
|||
fields are equal. |
|||
- Every other combination of things are not equal. |
|||
|
|||
The return value is undefined if a and b are not protocol buffers. |
|||
*/ |
|||
func Equal(a, b Message) bool { |
|||
if a == nil || b == nil { |
|||
return a == b |
|||
} |
|||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) |
|||
if v1.Type() != v2.Type() { |
|||
return false |
|||
} |
|||
if v1.Kind() == reflect.Ptr { |
|||
if v1.IsNil() { |
|||
return v2.IsNil() |
|||
} |
|||
if v2.IsNil() { |
|||
return false |
|||
} |
|||
v1, v2 = v1.Elem(), v2.Elem() |
|||
} |
|||
if v1.Kind() != reflect.Struct { |
|||
return false |
|||
} |
|||
return equalStruct(v1, v2) |
|||
} |
|||
|
|||
// v1 and v2 are known to have the same type.
|
|||
func equalStruct(v1, v2 reflect.Value) bool { |
|||
sprop := GetProperties(v1.Type()) |
|||
for i := 0; i < v1.NumField(); i++ { |
|||
f := v1.Type().Field(i) |
|||
if strings.HasPrefix(f.Name, "XXX_") { |
|||
continue |
|||
} |
|||
f1, f2 := v1.Field(i), v2.Field(i) |
|||
if f.Type.Kind() == reflect.Ptr { |
|||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { |
|||
// both unset
|
|||
continue |
|||
} else if n1 != n2 { |
|||
// set/unset mismatch
|
|||
return false |
|||
} |
|||
b1, ok := f1.Interface().(raw) |
|||
if ok { |
|||
b2 := f2.Interface().(raw) |
|||
// RawMessage
|
|||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) { |
|||
return false |
|||
} |
|||
continue |
|||
} |
|||
f1, f2 = f1.Elem(), f2.Elem() |
|||
} |
|||
if !equalAny(f1, f2, sprop.Prop[i]) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { |
|||
em2 := v2.FieldByName("XXX_InternalExtensions") |
|||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { |
|||
em2 := v2.FieldByName("XXX_extensions") |
|||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
uf := v1.FieldByName("XXX_unrecognized") |
|||
if !uf.IsValid() { |
|||
return true |
|||
} |
|||
|
|||
u1 := uf.Bytes() |
|||
u2 := v2.FieldByName("XXX_unrecognized").Bytes() |
|||
if !bytes.Equal(u1, u2) { |
|||
return false |
|||
} |
|||
|
|||
return true |
|||
} |
|||
|
|||
// v1 and v2 are known to have the same type.
|
|||
// prop may be nil.
|
|||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool { |
|||
if v1.Type() == protoMessageType { |
|||
m1, _ := v1.Interface().(Message) |
|||
m2, _ := v2.Interface().(Message) |
|||
return Equal(m1, m2) |
|||
} |
|||
switch v1.Kind() { |
|||
case reflect.Bool: |
|||
return v1.Bool() == v2.Bool() |
|||
case reflect.Float32, reflect.Float64: |
|||
return v1.Float() == v2.Float() |
|||
case reflect.Int32, reflect.Int64: |
|||
return v1.Int() == v2.Int() |
|||
case reflect.Interface: |
|||
// Probably a oneof field; compare the inner values.
|
|||
n1, n2 := v1.IsNil(), v2.IsNil() |
|||
if n1 || n2 { |
|||
return n1 == n2 |
|||
} |
|||
e1, e2 := v1.Elem(), v2.Elem() |
|||
if e1.Type() != e2.Type() { |
|||
return false |
|||
} |
|||
return equalAny(e1, e2, nil) |
|||
case reflect.Map: |
|||
if v1.Len() != v2.Len() { |
|||
return false |
|||
} |
|||
for _, key := range v1.MapKeys() { |
|||
val2 := v2.MapIndex(key) |
|||
if !val2.IsValid() { |
|||
// This key was not found in the second map.
|
|||
return false |
|||
} |
|||
if !equalAny(v1.MapIndex(key), val2, nil) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
case reflect.Ptr: |
|||
// Maps may have nil values in them, so check for nil.
|
|||
if v1.IsNil() && v2.IsNil() { |
|||
return true |
|||
} |
|||
if v1.IsNil() != v2.IsNil() { |
|||
return false |
|||
} |
|||
return equalAny(v1.Elem(), v2.Elem(), prop) |
|||
case reflect.Slice: |
|||
if v1.Type().Elem().Kind() == reflect.Uint8 { |
|||
// short circuit: []byte
|
|||
|
|||
// Edge case: if this is in a proto3 message, a zero length
|
|||
// bytes field is considered the zero value.
|
|||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { |
|||
return true |
|||
} |
|||
if v1.IsNil() != v2.IsNil() { |
|||
return false |
|||
} |
|||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) |
|||
} |
|||
|
|||
if v1.Len() != v2.Len() { |
|||
return false |
|||
} |
|||
for i := 0; i < v1.Len(); i++ { |
|||
if !equalAny(v1.Index(i), v2.Index(i), prop) { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
case reflect.String: |
|||
return v1.Interface().(string) == v2.Interface().(string) |
|||
case reflect.Struct: |
|||
return equalStruct(v1, v2) |
|||
case reflect.Uint32, reflect.Uint64: |
|||
return v1.Uint() == v2.Uint() |
|||
} |
|||
|
|||
// unknown type, so not a protocol buffer
|
|||
log.Printf("proto: don't know how to compare %v", v1) |
|||
return false |
|||
} |
|||
|
|||
// base is the struct type that the extensions are based on.
|
|||
// x1 and x2 are InternalExtensions.
|
|||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { |
|||
em1, _ := x1.extensionsRead() |
|||
em2, _ := x2.extensionsRead() |
|||
return equalExtMap(base, em1, em2) |
|||
} |
|||
|
|||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { |
|||
if len(em1) != len(em2) { |
|||
return false |
|||
} |
|||
|
|||
for extNum, e1 := range em1 { |
|||
e2, ok := em2[extNum] |
|||
if !ok { |
|||
return false |
|||
} |
|||
|
|||
m1, m2 := e1.value, e2.value |
|||
|
|||
if m1 != nil && m2 != nil { |
|||
// Both are unencoded.
|
|||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { |
|||
return false |
|||
} |
|||
continue |
|||
} |
|||
|
|||
// At least one is encoded. To do a semantically correct comparison
|
|||
// we need to unmarshal them first.
|
|||
var desc *ExtensionDesc |
|||
if m := extensionMaps[base]; m != nil { |
|||
desc = m[extNum] |
|||
} |
|||
if desc == nil { |
|||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) |
|||
continue |
|||
} |
|||
var err error |
|||
if m1 == nil { |
|||
m1, err = decodeExtension(e1.enc, desc) |
|||
} |
|||
if m2 == nil && err == nil { |
|||
m2, err = decodeExtension(e2.enc, desc) |
|||
} |
|||
if err != nil { |
|||
// The encoded form is invalid.
|
|||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) |
|||
return false |
|||
} |
|||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { |
|||
return false |
|||
} |
|||
} |
|||
|
|||
return true |
|||
} |
|||
@ -0,0 +1,224 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
. "github.com/golang/protobuf/proto" |
|||
proto3pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
pb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
// Four identical base messages.
|
|||
// The init function adds extensions to some of them.
|
|||
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} |
|||
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} |
|||
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} |
|||
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} |
|||
|
|||
// Two messages with non-message extensions.
|
|||
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} |
|||
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} |
|||
|
|||
func init() { |
|||
ext1 := &pb.Ext{Data: String("Kirk")} |
|||
ext2 := &pb.Ext{Data: String("Picard")} |
|||
|
|||
// messageWithExtension1a has ext1, but never marshals it.
|
|||
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { |
|||
panic("SetExtension on 1a failed: " + err.Error()) |
|||
} |
|||
|
|||
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
|
|||
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { |
|||
panic("SetExtension on 1b failed: " + err.Error()) |
|||
} |
|||
buf, err := Marshal(messageWithExtension1b) |
|||
if err != nil { |
|||
panic("Marshal of 1b failed: " + err.Error()) |
|||
} |
|||
messageWithExtension1b.Reset() |
|||
if err := Unmarshal(buf, messageWithExtension1b); err != nil { |
|||
panic("Unmarshal of 1b failed: " + err.Error()) |
|||
} |
|||
|
|||
// messageWithExtension2 has ext2.
|
|||
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { |
|||
panic("SetExtension on 2 failed: " + err.Error()) |
|||
} |
|||
|
|||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { |
|||
panic("SetExtension on Int32-1 failed: " + err.Error()) |
|||
} |
|||
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { |
|||
panic("SetExtension on Int32-2 failed: " + err.Error()) |
|||
} |
|||
} |
|||
|
|||
var EqualTests = []struct { |
|||
desc string |
|||
a, b Message |
|||
exp bool |
|||
}{ |
|||
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, |
|||
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, |
|||
{"nil vs nil", nil, nil, true}, |
|||
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, |
|||
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, |
|||
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, |
|||
|
|||
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, |
|||
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, |
|||
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, |
|||
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, |
|||
|
|||
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, |
|||
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, |
|||
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, |
|||
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, |
|||
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, |
|||
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, |
|||
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, |
|||
|
|||
{ |
|||
"nested, different", |
|||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, |
|||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, |
|||
false, |
|||
}, |
|||
{ |
|||
"nested, equal", |
|||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, |
|||
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, |
|||
true, |
|||
}, |
|||
|
|||
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, |
|||
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, |
|||
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, |
|||
{ |
|||
"repeated bytes", |
|||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, |
|||
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, |
|||
true, |
|||
}, |
|||
// In proto3, []byte{} and []byte(nil) are equal.
|
|||
{"proto3 bytes, empty vs nil", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true}, |
|||
|
|||
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, |
|||
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, |
|||
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, |
|||
|
|||
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, |
|||
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, |
|||
|
|||
{ |
|||
"message with group", |
|||
&pb.MyMessage{ |
|||
Count: Int32(1), |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: Int32(5), |
|||
}, |
|||
}, |
|||
&pb.MyMessage{ |
|||
Count: Int32(1), |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: Int32(5), |
|||
}, |
|||
}, |
|||
true, |
|||
}, |
|||
|
|||
{ |
|||
"map same", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, |
|||
true, |
|||
}, |
|||
{ |
|||
"map different entry", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, |
|||
false, |
|||
}, |
|||
{ |
|||
"map different key only", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, |
|||
false, |
|||
}, |
|||
{ |
|||
"map different value only", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, |
|||
false, |
|||
}, |
|||
{ |
|||
"zero-length maps same", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{}}, |
|||
&pb.MessageWithMap{NameMapping: nil}, |
|||
true, |
|||
}, |
|||
{ |
|||
"orders in map don't matter", |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken", 2: "Rob"}}, |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob", 1: "Ken"}}, |
|||
true, |
|||
}, |
|||
{ |
|||
"oneof same", |
|||
&pb.Communique{Union: &pb.Communique_Number{41}}, |
|||
&pb.Communique{Union: &pb.Communique_Number{41}}, |
|||
true, |
|||
}, |
|||
{ |
|||
"oneof one nil", |
|||
&pb.Communique{Union: &pb.Communique_Number{41}}, |
|||
&pb.Communique{}, |
|||
false, |
|||
}, |
|||
{ |
|||
"oneof different", |
|||
&pb.Communique{Union: &pb.Communique_Number{41}}, |
|||
&pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, |
|||
false, |
|||
}, |
|||
} |
|||
|
|||
func TestEqual(t *testing.T) { |
|||
for _, tc := range EqualTests { |
|||
if res := Equal(tc.a, tc.b); res != tc.exp { |
|||
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,586 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
/* |
|||
* Types and routines for supporting protocol buffer extensions. |
|||
*/ |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"reflect" |
|||
"strconv" |
|||
"sync" |
|||
) |
|||
|
|||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
|||
var ErrMissingExtension = errors.New("proto: missing extension") |
|||
|
|||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
|||
// Used in code generated by the protocol compiler.
|
|||
type ExtensionRange struct { |
|||
Start, End int32 // both inclusive
|
|||
} |
|||
|
|||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
|||
// proto compiler that may be extended.
|
|||
type extendableProto interface { |
|||
Message |
|||
ExtensionRangeArray() []ExtensionRange |
|||
extensionsWrite() map[int32]Extension |
|||
extensionsRead() (map[int32]Extension, sync.Locker) |
|||
} |
|||
|
|||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
|||
// version of the proto compiler that may be extended.
|
|||
type extendableProtoV1 interface { |
|||
Message |
|||
ExtensionRangeArray() []ExtensionRange |
|||
ExtensionMap() map[int32]Extension |
|||
} |
|||
|
|||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
|||
type extensionAdapter struct { |
|||
extendableProtoV1 |
|||
} |
|||
|
|||
func (e extensionAdapter) extensionsWrite() map[int32]Extension { |
|||
return e.ExtensionMap() |
|||
} |
|||
|
|||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { |
|||
return e.ExtensionMap(), notLocker{} |
|||
} |
|||
|
|||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
|||
type notLocker struct{} |
|||
|
|||
func (n notLocker) Lock() {} |
|||
func (n notLocker) Unlock() {} |
|||
|
|||
// extendable returns the extendableProto interface for the given generated proto message.
|
|||
// If the proto message has the old extension format, it returns a wrapper that implements
|
|||
// the extendableProto interface.
|
|||
func extendable(p interface{}) (extendableProto, bool) { |
|||
if ep, ok := p.(extendableProto); ok { |
|||
return ep, ok |
|||
} |
|||
if ep, ok := p.(extendableProtoV1); ok { |
|||
return extensionAdapter{ep}, ok |
|||
} |
|||
return nil, false |
|||
} |
|||
|
|||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
|||
//
|
|||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
|||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
|||
//
|
|||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
|||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
|||
type XXX_InternalExtensions struct { |
|||
// The struct must be indirect so that if a user inadvertently copies a
|
|||
// generated message and its embedded XXX_InternalExtensions, they
|
|||
// avoid the mayhem of a copied mutex.
|
|||
//
|
|||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
|||
// It is up to the client to ensure that write operations to p.extensionMap are
|
|||
// mutually exclusive with other accesses.
|
|||
p *struct { |
|||
mu sync.Mutex |
|||
extensionMap map[int32]Extension |
|||
} |
|||
} |
|||
|
|||
// extensionsWrite returns the extension map, creating it on first use.
|
|||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { |
|||
if e.p == nil { |
|||
e.p = new(struct { |
|||
mu sync.Mutex |
|||
extensionMap map[int32]Extension |
|||
}) |
|||
e.p.extensionMap = make(map[int32]Extension) |
|||
} |
|||
return e.p.extensionMap |
|||
} |
|||
|
|||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
|||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
|||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { |
|||
if e.p == nil { |
|||
return nil, nil |
|||
} |
|||
return e.p.extensionMap, &e.p.mu |
|||
} |
|||
|
|||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() |
|||
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() |
|||
|
|||
// ExtensionDesc represents an extension specification.
|
|||
// Used in generated code from the protocol compiler.
|
|||
type ExtensionDesc struct { |
|||
ExtendedType Message // nil pointer to the type that is being extended
|
|||
ExtensionType interface{} // nil pointer to the extension type
|
|||
Field int32 // field number
|
|||
Name string // fully-qualified name of extension, for text formatting
|
|||
Tag string // protobuf tag style
|
|||
} |
|||
|
|||
func (ed *ExtensionDesc) repeated() bool { |
|||
t := reflect.TypeOf(ed.ExtensionType) |
|||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 |
|||
} |
|||
|
|||
// Extension represents an extension in a message.
|
|||
type Extension struct { |
|||
// When an extension is stored in a message using SetExtension
|
|||
// only desc and value are set. When the message is marshaled
|
|||
// enc will be set to the encoded form of the message.
|
|||
//
|
|||
// When a message is unmarshaled and contains extensions, each
|
|||
// extension will have only enc set. When such an extension is
|
|||
// accessed using GetExtension (or GetExtensions) desc and value
|
|||
// will be set.
|
|||
desc *ExtensionDesc |
|||
value interface{} |
|||
enc []byte |
|||
} |
|||
|
|||
// SetRawExtension is for testing only.
|
|||
func SetRawExtension(base Message, id int32, b []byte) { |
|||
epb, ok := extendable(base) |
|||
if !ok { |
|||
return |
|||
} |
|||
extmap := epb.extensionsWrite() |
|||
extmap[id] = Extension{enc: b} |
|||
} |
|||
|
|||
// isExtensionField returns true iff the given field number is in an extension range.
|
|||
func isExtensionField(pb extendableProto, field int32) bool { |
|||
for _, er := range pb.ExtensionRangeArray() { |
|||
if er.Start <= field && field <= er.End { |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// checkExtensionTypes checks that the given extension is valid for pb.
|
|||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { |
|||
var pbi interface{} = pb |
|||
// Check the extended type.
|
|||
if ea, ok := pbi.(extensionAdapter); ok { |
|||
pbi = ea.extendableProtoV1 |
|||
} |
|||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { |
|||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) |
|||
} |
|||
// Check the range.
|
|||
if !isExtensionField(pb, extension.Field) { |
|||
return errors.New("proto: bad extension number; not in declared ranges") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// extPropKey is sufficient to uniquely identify an extension.
|
|||
type extPropKey struct { |
|||
base reflect.Type |
|||
field int32 |
|||
} |
|||
|
|||
var extProp = struct { |
|||
sync.RWMutex |
|||
m map[extPropKey]*Properties |
|||
}{ |
|||
m: make(map[extPropKey]*Properties), |
|||
} |
|||
|
|||
func extensionProperties(ed *ExtensionDesc) *Properties { |
|||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} |
|||
|
|||
extProp.RLock() |
|||
if prop, ok := extProp.m[key]; ok { |
|||
extProp.RUnlock() |
|||
return prop |
|||
} |
|||
extProp.RUnlock() |
|||
|
|||
extProp.Lock() |
|||
defer extProp.Unlock() |
|||
// Check again.
|
|||
if prop, ok := extProp.m[key]; ok { |
|||
return prop |
|||
} |
|||
|
|||
prop := new(Properties) |
|||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) |
|||
extProp.m[key] = prop |
|||
return prop |
|||
} |
|||
|
|||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
|||
func encodeExtensions(e *XXX_InternalExtensions) error { |
|||
m, mu := e.extensionsRead() |
|||
if m == nil { |
|||
return nil // fast path
|
|||
} |
|||
mu.Lock() |
|||
defer mu.Unlock() |
|||
return encodeExtensionsMap(m) |
|||
} |
|||
|
|||
// encode encodes any unmarshaled (unencoded) extensions in e.
|
|||
func encodeExtensionsMap(m map[int32]Extension) error { |
|||
for k, e := range m { |
|||
if e.value == nil || e.desc == nil { |
|||
// Extension is only in its encoded form.
|
|||
continue |
|||
} |
|||
|
|||
// We don't skip extensions that have an encoded form set,
|
|||
// because the extension value may have been mutated after
|
|||
// the last time this function was called.
|
|||
|
|||
et := reflect.TypeOf(e.desc.ExtensionType) |
|||
props := extensionProperties(e.desc) |
|||
|
|||
p := NewBuffer(nil) |
|||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|||
// Pass a *T with a zero field and hope it all works out.
|
|||
x := reflect.New(et) |
|||
x.Elem().Set(reflect.ValueOf(e.value)) |
|||
if err := props.enc(p, props, toStructPointer(x)); err != nil { |
|||
return err |
|||
} |
|||
e.enc = p.buf |
|||
m[k] = e |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func extensionsSize(e *XXX_InternalExtensions) (n int) { |
|||
m, mu := e.extensionsRead() |
|||
if m == nil { |
|||
return 0 |
|||
} |
|||
mu.Lock() |
|||
defer mu.Unlock() |
|||
return extensionsMapSize(m) |
|||
} |
|||
|
|||
func extensionsMapSize(m map[int32]Extension) (n int) { |
|||
for _, e := range m { |
|||
if e.value == nil || e.desc == nil { |
|||
// Extension is only in its encoded form.
|
|||
n += len(e.enc) |
|||
continue |
|||
} |
|||
|
|||
// We don't skip extensions that have an encoded form set,
|
|||
// because the extension value may have been mutated after
|
|||
// the last time this function was called.
|
|||
|
|||
et := reflect.TypeOf(e.desc.ExtensionType) |
|||
props := extensionProperties(e.desc) |
|||
|
|||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|||
// Pass a *T with a zero field and hope it all works out.
|
|||
x := reflect.New(et) |
|||
x.Elem().Set(reflect.ValueOf(e.value)) |
|||
n += props.size(props, toStructPointer(x)) |
|||
} |
|||
return |
|||
} |
|||
|
|||
// HasExtension returns whether the given extension is present in pb.
|
|||
func HasExtension(pb Message, extension *ExtensionDesc) bool { |
|||
// TODO: Check types, field numbers, etc.?
|
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return false |
|||
} |
|||
extmap, mu := epb.extensionsRead() |
|||
if extmap == nil { |
|||
return false |
|||
} |
|||
mu.Lock() |
|||
_, ok = extmap[extension.Field] |
|||
mu.Unlock() |
|||
return ok |
|||
} |
|||
|
|||
// ClearExtension removes the given extension from pb.
|
|||
func ClearExtension(pb Message, extension *ExtensionDesc) { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return |
|||
} |
|||
// TODO: Check types, field numbers, etc.?
|
|||
extmap := epb.extensionsWrite() |
|||
delete(extmap, extension.Field) |
|||
} |
|||
|
|||
// GetExtension parses and returns the given extension of pb.
|
|||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
|||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return nil, errors.New("proto: not an extendable proto") |
|||
} |
|||
|
|||
if err := checkExtensionTypes(epb, extension); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
emap, mu := epb.extensionsRead() |
|||
if emap == nil { |
|||
return defaultExtensionValue(extension) |
|||
} |
|||
mu.Lock() |
|||
defer mu.Unlock() |
|||
e, ok := emap[extension.Field] |
|||
if !ok { |
|||
// defaultExtensionValue returns the default value or
|
|||
// ErrMissingExtension if there is no default.
|
|||
return defaultExtensionValue(extension) |
|||
} |
|||
|
|||
if e.value != nil { |
|||
// Already decoded. Check the descriptor, though.
|
|||
if e.desc != extension { |
|||
// This shouldn't happen. If it does, it means that
|
|||
// GetExtension was called twice with two different
|
|||
// descriptors with the same field number.
|
|||
return nil, errors.New("proto: descriptor conflict") |
|||
} |
|||
return e.value, nil |
|||
} |
|||
|
|||
v, err := decodeExtension(e.enc, extension) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
// Remember the decoded version and drop the encoded version.
|
|||
// That way it is safe to mutate what we return.
|
|||
e.value = v |
|||
e.desc = extension |
|||
e.enc = nil |
|||
emap[extension.Field] = e |
|||
return e.value, nil |
|||
} |
|||
|
|||
// defaultExtensionValue returns the default value for extension.
|
|||
// If no default for an extension is defined ErrMissingExtension is returned.
|
|||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { |
|||
t := reflect.TypeOf(extension.ExtensionType) |
|||
props := extensionProperties(extension) |
|||
|
|||
sf, _, err := fieldDefault(t, props) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if sf == nil || sf.value == nil { |
|||
// There is no default value.
|
|||
return nil, ErrMissingExtension |
|||
} |
|||
|
|||
if t.Kind() != reflect.Ptr { |
|||
// We do not need to return a Ptr, we can directly return sf.value.
|
|||
return sf.value, nil |
|||
} |
|||
|
|||
// We need to return an interface{} that is a pointer to sf.value.
|
|||
value := reflect.New(t).Elem() |
|||
value.Set(reflect.New(value.Type().Elem())) |
|||
if sf.kind == reflect.Int32 { |
|||
// We may have an int32 or an enum, but the underlying data is int32.
|
|||
// Since we can't set an int32 into a non int32 reflect.value directly
|
|||
// set it as a int32.
|
|||
value.Elem().SetInt(int64(sf.value.(int32))) |
|||
} else { |
|||
value.Elem().Set(reflect.ValueOf(sf.value)) |
|||
} |
|||
return value.Interface(), nil |
|||
} |
|||
|
|||
// decodeExtension decodes an extension encoded in b.
|
|||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { |
|||
o := NewBuffer(b) |
|||
|
|||
t := reflect.TypeOf(extension.ExtensionType) |
|||
|
|||
props := extensionProperties(extension) |
|||
|
|||
// t is a pointer to a struct, pointer to basic type or a slice.
|
|||
// Allocate a "field" to store the pointer/slice itself; the
|
|||
// pointer/slice will be stored here. We pass
|
|||
// the address of this field to props.dec.
|
|||
// This passes a zero field and a *t and lets props.dec
|
|||
// interpret it as a *struct{ x t }.
|
|||
value := reflect.New(t).Elem() |
|||
|
|||
for { |
|||
// Discard wire type and field number varint. It isn't needed.
|
|||
if _, err := o.DecodeVarint(); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
if o.index >= len(o.buf) { |
|||
break |
|||
} |
|||
} |
|||
return value.Interface(), nil |
|||
} |
|||
|
|||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
|||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
|||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return nil, errors.New("proto: not an extendable proto") |
|||
} |
|||
extensions = make([]interface{}, len(es)) |
|||
for i, e := range es { |
|||
extensions[i], err = GetExtension(epb, e) |
|||
if err == ErrMissingExtension { |
|||
err = nil |
|||
} |
|||
if err != nil { |
|||
return |
|||
} |
|||
} |
|||
return |
|||
} |
|||
|
|||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
|||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
|||
// just the Field field, which defines the extension's field number.
|
|||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) |
|||
} |
|||
registeredExtensions := RegisteredExtensions(pb) |
|||
|
|||
emap, mu := epb.extensionsRead() |
|||
if emap == nil { |
|||
return nil, nil |
|||
} |
|||
mu.Lock() |
|||
defer mu.Unlock() |
|||
extensions := make([]*ExtensionDesc, 0, len(emap)) |
|||
for extid, e := range emap { |
|||
desc := e.desc |
|||
if desc == nil { |
|||
desc = registeredExtensions[extid] |
|||
if desc == nil { |
|||
desc = &ExtensionDesc{Field: extid} |
|||
} |
|||
} |
|||
|
|||
extensions = append(extensions, desc) |
|||
} |
|||
return extensions, nil |
|||
} |
|||
|
|||
// SetExtension sets the specified extension of pb to the specified value.
|
|||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return errors.New("proto: not an extendable proto") |
|||
} |
|||
if err := checkExtensionTypes(epb, extension); err != nil { |
|||
return err |
|||
} |
|||
typ := reflect.TypeOf(extension.ExtensionType) |
|||
if typ != reflect.TypeOf(value) { |
|||
return errors.New("proto: bad extension value type") |
|||
} |
|||
// nil extension values need to be caught early, because the
|
|||
// encoder can't distinguish an ErrNil due to a nil extension
|
|||
// from an ErrNil due to a missing field. Extensions are
|
|||
// always optional, so the encoder would just swallow the error
|
|||
// and drop all the extensions from the encoded message.
|
|||
if reflect.ValueOf(value).IsNil() { |
|||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) |
|||
} |
|||
|
|||
extmap := epb.extensionsWrite() |
|||
extmap[extension.Field] = Extension{desc: extension, value: value} |
|||
return nil |
|||
} |
|||
|
|||
// ClearAllExtensions clears all extensions from pb.
|
|||
func ClearAllExtensions(pb Message) { |
|||
epb, ok := extendable(pb) |
|||
if !ok { |
|||
return |
|||
} |
|||
m := epb.extensionsWrite() |
|||
for k := range m { |
|||
delete(m, k) |
|||
} |
|||
} |
|||
|
|||
// A global registry of extensions.
|
|||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
|||
|
|||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) |
|||
|
|||
// RegisterExtension is called from the generated code.
|
|||
func RegisterExtension(desc *ExtensionDesc) { |
|||
st := reflect.TypeOf(desc.ExtendedType).Elem() |
|||
m := extensionMaps[st] |
|||
if m == nil { |
|||
m = make(map[int32]*ExtensionDesc) |
|||
extensionMaps[st] = m |
|||
} |
|||
if _, ok := m[desc.Field]; ok { |
|||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) |
|||
} |
|||
m[desc.Field] = desc |
|||
} |
|||
|
|||
// RegisteredExtensions returns a map of the registered extensions of a
|
|||
// protocol buffer struct, indexed by the extension number.
|
|||
// The argument pb should be a nil pointer to the struct type.
|
|||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { |
|||
return extensionMaps[reflect.TypeOf(pb).Elem()] |
|||
} |
|||
@ -0,0 +1,508 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"reflect" |
|||
"sort" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
pb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
func TestGetExtensionsWithMissingExtensions(t *testing.T) { |
|||
msg := &pb.MyMessage{} |
|||
ext1 := &pb.Ext{} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { |
|||
t.Fatalf("Could not set ext1: %s", err) |
|||
} |
|||
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ |
|||
pb.E_Ext_More, |
|||
pb.E_Ext_Text, |
|||
}) |
|||
if err != nil { |
|||
t.Fatalf("GetExtensions() failed: %s", err) |
|||
} |
|||
if exts[0] != ext1 { |
|||
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) |
|||
} |
|||
if exts[1] != nil { |
|||
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) |
|||
} |
|||
} |
|||
|
|||
func TestExtensionDescsWithMissingExtensions(t *testing.T) { |
|||
msg := &pb.MyMessage{Count: proto.Int32(0)} |
|||
extdesc1 := pb.E_Ext_More |
|||
if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { |
|||
t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err) |
|||
} |
|||
|
|||
ext1 := &pb.Ext{} |
|||
if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { |
|||
t.Fatalf("Could not set ext1: %s", err) |
|||
} |
|||
extdesc2 := &proto.ExtensionDesc{ |
|||
ExtendedType: (*pb.MyMessage)(nil), |
|||
ExtensionType: (*bool)(nil), |
|||
Field: 123456789, |
|||
Name: "a.b", |
|||
Tag: "varint,123456789,opt", |
|||
} |
|||
ext2 := proto.Bool(false) |
|||
if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { |
|||
t.Fatalf("Could not set ext2: %s", err) |
|||
} |
|||
|
|||
b, err := proto.Marshal(msg) |
|||
if err != nil { |
|||
t.Fatalf("Could not marshal msg: %v", err) |
|||
} |
|||
if err := proto.Unmarshal(b, msg); err != nil { |
|||
t.Fatalf("Could not unmarshal into msg: %v", err) |
|||
} |
|||
|
|||
descs, err := proto.ExtensionDescs(msg) |
|||
if err != nil { |
|||
t.Fatalf("proto.ExtensionDescs: got error %v", err) |
|||
} |
|||
sortExtDescs(descs) |
|||
wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} |
|||
if !reflect.DeepEqual(descs, wantDescs) { |
|||
t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) |
|||
} |
|||
} |
|||
|
|||
type ExtensionDescSlice []*proto.ExtensionDesc |
|||
|
|||
func (s ExtensionDescSlice) Len() int { return len(s) } |
|||
func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field } |
|||
func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } |
|||
|
|||
func sortExtDescs(s []*proto.ExtensionDesc) { |
|||
sort.Sort(ExtensionDescSlice(s)) |
|||
} |
|||
|
|||
func TestGetExtensionStability(t *testing.T) { |
|||
check := func(m *pb.MyMessage) bool { |
|||
ext1, err := proto.GetExtension(m, pb.E_Ext_More) |
|||
if err != nil { |
|||
t.Fatalf("GetExtension() failed: %s", err) |
|||
} |
|||
ext2, err := proto.GetExtension(m, pb.E_Ext_More) |
|||
if err != nil { |
|||
t.Fatalf("GetExtension() failed: %s", err) |
|||
} |
|||
return ext1 == ext2 |
|||
} |
|||
msg := &pb.MyMessage{Count: proto.Int32(4)} |
|||
ext0 := &pb.Ext{} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { |
|||
t.Fatalf("Could not set ext1: %s", ext0) |
|||
} |
|||
if !check(msg) { |
|||
t.Errorf("GetExtension() not stable before marshaling") |
|||
} |
|||
bb, err := proto.Marshal(msg) |
|||
if err != nil { |
|||
t.Fatalf("Marshal() failed: %s", err) |
|||
} |
|||
msg1 := &pb.MyMessage{} |
|||
err = proto.Unmarshal(bb, msg1) |
|||
if err != nil { |
|||
t.Fatalf("Unmarshal() failed: %s", err) |
|||
} |
|||
if !check(msg1) { |
|||
t.Errorf("GetExtension() not stable after unmarshaling") |
|||
} |
|||
} |
|||
|
|||
func TestGetExtensionDefaults(t *testing.T) { |
|||
var setFloat64 float64 = 1 |
|||
var setFloat32 float32 = 2 |
|||
var setInt32 int32 = 3 |
|||
var setInt64 int64 = 4 |
|||
var setUint32 uint32 = 5 |
|||
var setUint64 uint64 = 6 |
|||
var setBool = true |
|||
var setBool2 = false |
|||
var setString = "Goodnight string" |
|||
var setBytes = []byte("Goodnight bytes") |
|||
var setEnum = pb.DefaultsMessage_TWO |
|||
|
|||
type testcase struct { |
|||
ext *proto.ExtensionDesc // Extension we are testing.
|
|||
want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
|
|||
def interface{} // Expected value of extension after ClearExtension().
|
|||
} |
|||
tests := []testcase{ |
|||
{pb.E_NoDefaultDouble, setFloat64, nil}, |
|||
{pb.E_NoDefaultFloat, setFloat32, nil}, |
|||
{pb.E_NoDefaultInt32, setInt32, nil}, |
|||
{pb.E_NoDefaultInt64, setInt64, nil}, |
|||
{pb.E_NoDefaultUint32, setUint32, nil}, |
|||
{pb.E_NoDefaultUint64, setUint64, nil}, |
|||
{pb.E_NoDefaultSint32, setInt32, nil}, |
|||
{pb.E_NoDefaultSint64, setInt64, nil}, |
|||
{pb.E_NoDefaultFixed32, setUint32, nil}, |
|||
{pb.E_NoDefaultFixed64, setUint64, nil}, |
|||
{pb.E_NoDefaultSfixed32, setInt32, nil}, |
|||
{pb.E_NoDefaultSfixed64, setInt64, nil}, |
|||
{pb.E_NoDefaultBool, setBool, nil}, |
|||
{pb.E_NoDefaultBool, setBool2, nil}, |
|||
{pb.E_NoDefaultString, setString, nil}, |
|||
{pb.E_NoDefaultBytes, setBytes, nil}, |
|||
{pb.E_NoDefaultEnum, setEnum, nil}, |
|||
{pb.E_DefaultDouble, setFloat64, float64(3.1415)}, |
|||
{pb.E_DefaultFloat, setFloat32, float32(3.14)}, |
|||
{pb.E_DefaultInt32, setInt32, int32(42)}, |
|||
{pb.E_DefaultInt64, setInt64, int64(43)}, |
|||
{pb.E_DefaultUint32, setUint32, uint32(44)}, |
|||
{pb.E_DefaultUint64, setUint64, uint64(45)}, |
|||
{pb.E_DefaultSint32, setInt32, int32(46)}, |
|||
{pb.E_DefaultSint64, setInt64, int64(47)}, |
|||
{pb.E_DefaultFixed32, setUint32, uint32(48)}, |
|||
{pb.E_DefaultFixed64, setUint64, uint64(49)}, |
|||
{pb.E_DefaultSfixed32, setInt32, int32(50)}, |
|||
{pb.E_DefaultSfixed64, setInt64, int64(51)}, |
|||
{pb.E_DefaultBool, setBool, true}, |
|||
{pb.E_DefaultBool, setBool2, true}, |
|||
{pb.E_DefaultString, setString, "Hello, string"}, |
|||
{pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, |
|||
{pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, |
|||
} |
|||
|
|||
checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error { |
|||
val, err := proto.GetExtension(msg, test.ext) |
|||
if err != nil { |
|||
if valWant != nil { |
|||
return fmt.Errorf("GetExtension(): %s", err) |
|||
} |
|||
if want := proto.ErrMissingExtension; err != want { |
|||
return fmt.Errorf("Unexpected error: got %v, want %v", err, want) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// All proto2 extension values are either a pointer to a value or a slice of values.
|
|||
ty := reflect.TypeOf(val) |
|||
tyWant := reflect.TypeOf(test.ext.ExtensionType) |
|||
if got, want := ty, tyWant; got != want { |
|||
return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want) |
|||
} |
|||
tye := ty.Elem() |
|||
tyeWant := tyWant.Elem() |
|||
if got, want := tye, tyeWant; got != want { |
|||
return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want) |
|||
} |
|||
|
|||
// Check the name of the type of the value.
|
|||
// If it is an enum it will be type int32 with the name of the enum.
|
|||
if got, want := tye.Name(), tye.Name(); got != want { |
|||
return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want) |
|||
} |
|||
|
|||
// Check that value is what we expect.
|
|||
// If we have a pointer in val, get the value it points to.
|
|||
valExp := val |
|||
if ty.Kind() == reflect.Ptr { |
|||
valExp = reflect.ValueOf(val).Elem().Interface() |
|||
} |
|||
if got, want := valExp, valWant; !reflect.DeepEqual(got, want) { |
|||
return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
setTo := func(test testcase) interface{} { |
|||
setTo := reflect.ValueOf(test.want) |
|||
if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr { |
|||
setTo = reflect.New(typ).Elem() |
|||
setTo.Set(reflect.New(setTo.Type().Elem())) |
|||
setTo.Elem().Set(reflect.ValueOf(test.want)) |
|||
} |
|||
return setTo.Interface() |
|||
} |
|||
|
|||
for _, test := range tests { |
|||
msg := &pb.DefaultsMessage{} |
|||
name := test.ext.Name |
|||
|
|||
// Check the initial value.
|
|||
if err := checkVal(test, msg, test.def); err != nil { |
|||
t.Errorf("%s: %v", name, err) |
|||
} |
|||
|
|||
// Set the per-type value and check value.
|
|||
name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want) |
|||
if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil { |
|||
t.Errorf("%s: SetExtension(): %v", name, err) |
|||
continue |
|||
} |
|||
if err := checkVal(test, msg, test.want); err != nil { |
|||
t.Errorf("%s: %v", name, err) |
|||
continue |
|||
} |
|||
|
|||
// Set and check the value.
|
|||
name += " (cleared)" |
|||
proto.ClearExtension(msg, test.ext) |
|||
if err := checkVal(test, msg, test.def); err != nil { |
|||
t.Errorf("%s: %v", name, err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestExtensionsRoundTrip(t *testing.T) { |
|||
msg := &pb.MyMessage{} |
|||
ext1 := &pb.Ext{ |
|||
Data: proto.String("hi"), |
|||
} |
|||
ext2 := &pb.Ext{ |
|||
Data: proto.String("there"), |
|||
} |
|||
exists := proto.HasExtension(msg, pb.E_Ext_More) |
|||
if exists { |
|||
t.Error("Extension More present unexpectedly") |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { |
|||
t.Error(err) |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { |
|||
t.Error(err) |
|||
} |
|||
e, err := proto.GetExtension(msg, pb.E_Ext_More) |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
x, ok := e.(*pb.Ext) |
|||
if !ok { |
|||
t.Errorf("e has type %T, expected testdata.Ext", e) |
|||
} else if *x.Data != "there" { |
|||
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) |
|||
} |
|||
proto.ClearExtension(msg, pb.E_Ext_More) |
|||
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { |
|||
t.Errorf("got %v, expected ErrMissingExtension", e) |
|||
} |
|||
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { |
|||
t.Error("expected bad extension error, got nil") |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { |
|||
t.Error("expected extension err") |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { |
|||
t.Error("expected some sort of type mismatch error, got nil") |
|||
} |
|||
} |
|||
|
|||
func TestNilExtension(t *testing.T) { |
|||
msg := &pb.MyMessage{ |
|||
Count: proto.Int32(1), |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { |
|||
t.Error("expected SetExtension to fail due to a nil extension") |
|||
} else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { |
|||
t.Errorf("expected error %v, got %v", want, err) |
|||
} |
|||
// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
|
|||
// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
|
|||
} |
|||
|
|||
func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { |
|||
// Add a repeated extension to the result.
|
|||
tests := []struct { |
|||
name string |
|||
ext []*pb.ComplexExtension |
|||
}{ |
|||
{ |
|||
"two fields", |
|||
[]*pb.ComplexExtension{ |
|||
{First: proto.Int32(7)}, |
|||
{Second: proto.Int32(11)}, |
|||
}, |
|||
}, |
|||
{ |
|||
"repeated field", |
|||
[]*pb.ComplexExtension{ |
|||
{Third: []int32{1000}}, |
|||
{Third: []int32{2000}}, |
|||
}, |
|||
}, |
|||
{ |
|||
"two fields and repeated field", |
|||
[]*pb.ComplexExtension{ |
|||
{Third: []int32{1000}}, |
|||
{First: proto.Int32(9)}, |
|||
{Second: proto.Int32(21)}, |
|||
{Third: []int32{2000}}, |
|||
}, |
|||
}, |
|||
} |
|||
for _, test := range tests { |
|||
// Marshal message with a repeated extension.
|
|||
msg1 := new(pb.OtherMessage) |
|||
err := proto.SetExtension(msg1, pb.E_RComplex, test.ext) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error setting extension: %v", test.name, err) |
|||
} |
|||
b, err := proto.Marshal(msg1) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error marshaling message: %v", test.name, err) |
|||
} |
|||
|
|||
// Unmarshal and read the merged proto.
|
|||
msg2 := new(pb.OtherMessage) |
|||
err = proto.Unmarshal(b, msg2) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) |
|||
} |
|||
e, err := proto.GetExtension(msg2, pb.E_RComplex) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error getting extension: %v", test.name, err) |
|||
} |
|||
ext := e.([]*pb.ComplexExtension) |
|||
if ext == nil { |
|||
t.Fatalf("[%s] Invalid extension", test.name) |
|||
} |
|||
if !reflect.DeepEqual(ext, test.ext) { |
|||
t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { |
|||
// We may see multiple instances of the same extension in the wire
|
|||
// format. For example, the proto compiler may encode custom options in
|
|||
// this way. Here, we verify that we merge the extensions together.
|
|||
tests := []struct { |
|||
name string |
|||
ext []*pb.ComplexExtension |
|||
}{ |
|||
{ |
|||
"two fields", |
|||
[]*pb.ComplexExtension{ |
|||
{First: proto.Int32(7)}, |
|||
{Second: proto.Int32(11)}, |
|||
}, |
|||
}, |
|||
{ |
|||
"repeated field", |
|||
[]*pb.ComplexExtension{ |
|||
{Third: []int32{1000}}, |
|||
{Third: []int32{2000}}, |
|||
}, |
|||
}, |
|||
{ |
|||
"two fields and repeated field", |
|||
[]*pb.ComplexExtension{ |
|||
{Third: []int32{1000}}, |
|||
{First: proto.Int32(9)}, |
|||
{Second: proto.Int32(21)}, |
|||
{Third: []int32{2000}}, |
|||
}, |
|||
}, |
|||
} |
|||
for _, test := range tests { |
|||
var buf bytes.Buffer |
|||
var want pb.ComplexExtension |
|||
|
|||
// Generate a serialized representation of a repeated extension
|
|||
// by catenating bytes together.
|
|||
for i, e := range test.ext { |
|||
// Merge to create the wanted proto.
|
|||
proto.Merge(&want, e) |
|||
|
|||
// serialize the message
|
|||
msg := new(pb.OtherMessage) |
|||
err := proto.SetExtension(msg, pb.E_Complex, e) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err) |
|||
} |
|||
b, err := proto.Marshal(msg) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err) |
|||
} |
|||
buf.Write(b) |
|||
} |
|||
|
|||
// Unmarshal and read the merged proto.
|
|||
msg2 := new(pb.OtherMessage) |
|||
err := proto.Unmarshal(buf.Bytes(), msg2) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err) |
|||
} |
|||
e, err := proto.GetExtension(msg2, pb.E_Complex) |
|||
if err != nil { |
|||
t.Fatalf("[%s] Error getting extension: %v", test.name, err) |
|||
} |
|||
ext := e.(*pb.ComplexExtension) |
|||
if ext == nil { |
|||
t.Fatalf("[%s] Invalid extension", test.name) |
|||
} |
|||
if !reflect.DeepEqual(*ext, want) { |
|||
t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestClearAllExtensions(t *testing.T) { |
|||
// unregistered extension
|
|||
desc := &proto.ExtensionDesc{ |
|||
ExtendedType: (*pb.MyMessage)(nil), |
|||
ExtensionType: (*bool)(nil), |
|||
Field: 101010100, |
|||
Name: "emptyextension", |
|||
Tag: "varint,0,opt", |
|||
} |
|||
m := &pb.MyMessage{} |
|||
if proto.HasExtension(m, desc) { |
|||
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) |
|||
} |
|||
if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { |
|||
t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) |
|||
} |
|||
if !proto.HasExtension(m, desc) { |
|||
t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m)) |
|||
} |
|||
proto.ClearAllExtensions(m) |
|||
if proto.HasExtension(m, desc) { |
|||
t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m)) |
|||
} |
|||
} |
|||
@ -0,0 +1,898 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
/* |
|||
Package proto converts data structures to and from the wire format of |
|||
protocol buffers. It works in concert with the Go source code generated |
|||
for .proto files by the protocol compiler. |
|||
|
|||
A summary of the properties of the protocol buffer interface |
|||
for a protocol buffer variable v: |
|||
|
|||
- Names are turned from camel_case to CamelCase for export. |
|||
- There are no methods on v to set fields; just treat |
|||
them as structure fields. |
|||
- There are getters that return a field's value if set, |
|||
and return the field's default value if unset. |
|||
The getters work even if the receiver is a nil message. |
|||
- The zero value for a struct is its correct initialization state. |
|||
All desired fields must be set before marshaling. |
|||
- A Reset() method will restore a protobuf struct to its zero state. |
|||
- Non-repeated fields are pointers to the values; nil means unset. |
|||
That is, optional or required field int32 f becomes F *int32. |
|||
- Repeated fields are slices. |
|||
- Helper functions are available to aid the setting of fields. |
|||
msg.Foo = proto.String("hello") // set field
|
|||
- Constants are defined to hold the default values of all fields that |
|||
have them. They have the form Default_StructName_FieldName. |
|||
Because the getter methods handle defaulted values, |
|||
direct use of these constants should be rare. |
|||
- Enums are given type names and maps from names to values. |
|||
Enum values are prefixed by the enclosing message's name, or by the |
|||
enum's type name if it is a top-level enum. Enum types have a String |
|||
method, and a Enum method to assist in message construction. |
|||
- Nested messages, groups and enums have type names prefixed with the name of |
|||
the surrounding message type. |
|||
- Extensions are given descriptor names that start with E_, |
|||
followed by an underscore-delimited list of the nested messages |
|||
that contain it (if any) followed by the CamelCased name of the |
|||
extension field itself. HasExtension, ClearExtension, GetExtension |
|||
and SetExtension are functions for manipulating extensions. |
|||
- Oneof field sets are given a single field in their message, |
|||
with distinguished wrapper types for each possible field value. |
|||
- Marshal and Unmarshal are functions to encode and decode the wire format. |
|||
|
|||
When the .proto file specifies `syntax="proto3"`, there are some differences: |
|||
|
|||
- Non-repeated fields of non-message type are values instead of pointers. |
|||
- Getters are only generated for message and oneof fields. |
|||
- Enum types do not get an Enum method. |
|||
|
|||
The simplest way to describe this is to see an example. |
|||
Given file test.proto, containing |
|||
|
|||
package example; |
|||
|
|||
enum FOO { X = 17; } |
|||
|
|||
message Test { |
|||
required string label = 1; |
|||
optional int32 type = 2 [default=77]; |
|||
repeated int64 reps = 3; |
|||
optional group OptionalGroup = 4 { |
|||
required string RequiredField = 5; |
|||
} |
|||
oneof union { |
|||
int32 number = 6; |
|||
string name = 7; |
|||
} |
|||
} |
|||
|
|||
The resulting file, test.pb.go, is: |
|||
|
|||
package example |
|||
|
|||
import proto "github.com/golang/protobuf/proto" |
|||
import math "math" |
|||
|
|||
type FOO int32 |
|||
const ( |
|||
FOO_X FOO = 17 |
|||
) |
|||
var FOO_name = map[int32]string{ |
|||
17: "X", |
|||
} |
|||
var FOO_value = map[string]int32{ |
|||
"X": 17, |
|||
} |
|||
|
|||
func (x FOO) Enum() *FOO { |
|||
p := new(FOO) |
|||
*p = x |
|||
return p |
|||
} |
|||
func (x FOO) String() string { |
|||
return proto.EnumName(FOO_name, int32(x)) |
|||
} |
|||
func (x *FOO) UnmarshalJSON(data []byte) error { |
|||
value, err := proto.UnmarshalJSONEnum(FOO_value, data) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*x = FOO(value) |
|||
return nil |
|||
} |
|||
|
|||
type Test struct { |
|||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` |
|||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` |
|||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` |
|||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` |
|||
// Types that are valid to be assigned to Union:
|
|||
// *Test_Number
|
|||
// *Test_Name
|
|||
Union isTest_Union `protobuf_oneof:"union"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
func (m *Test) Reset() { *m = Test{} } |
|||
func (m *Test) String() string { return proto.CompactTextString(m) } |
|||
func (*Test) ProtoMessage() {} |
|||
|
|||
type isTest_Union interface { |
|||
isTest_Union() |
|||
} |
|||
|
|||
type Test_Number struct { |
|||
Number int32 `protobuf:"varint,6,opt,name=number"` |
|||
} |
|||
type Test_Name struct { |
|||
Name string `protobuf:"bytes,7,opt,name=name"` |
|||
} |
|||
|
|||
func (*Test_Number) isTest_Union() {} |
|||
func (*Test_Name) isTest_Union() {} |
|||
|
|||
func (m *Test) GetUnion() isTest_Union { |
|||
if m != nil { |
|||
return m.Union |
|||
} |
|||
return nil |
|||
} |
|||
const Default_Test_Type int32 = 77 |
|||
|
|||
func (m *Test) GetLabel() string { |
|||
if m != nil && m.Label != nil { |
|||
return *m.Label |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *Test) GetType() int32 { |
|||
if m != nil && m.Type != nil { |
|||
return *m.Type |
|||
} |
|||
return Default_Test_Type |
|||
} |
|||
|
|||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup { |
|||
if m != nil { |
|||
return m.Optionalgroup |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type Test_OptionalGroup struct { |
|||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` |
|||
} |
|||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } |
|||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } |
|||
|
|||
func (m *Test_OptionalGroup) GetRequiredField() string { |
|||
if m != nil && m.RequiredField != nil { |
|||
return *m.RequiredField |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *Test) GetNumber() int32 { |
|||
if x, ok := m.GetUnion().(*Test_Number); ok { |
|||
return x.Number |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Test) GetName() string { |
|||
if x, ok := m.GetUnion().(*Test_Name); ok { |
|||
return x.Name |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func init() { |
|||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value) |
|||
} |
|||
|
|||
To create and play with a Test object: |
|||
|
|||
package main |
|||
|
|||
import ( |
|||
"log" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
pb "./example.pb" |
|||
) |
|||
|
|||
func main() { |
|||
test := &pb.Test{ |
|||
Label: proto.String("hello"), |
|||
Type: proto.Int32(17), |
|||
Reps: []int64{1, 2, 3}, |
|||
Optionalgroup: &pb.Test_OptionalGroup{ |
|||
RequiredField: proto.String("good bye"), |
|||
}, |
|||
Union: &pb.Test_Name{"fred"}, |
|||
} |
|||
data, err := proto.Marshal(test) |
|||
if err != nil { |
|||
log.Fatal("marshaling error: ", err) |
|||
} |
|||
newTest := &pb.Test{} |
|||
err = proto.Unmarshal(data, newTest) |
|||
if err != nil { |
|||
log.Fatal("unmarshaling error: ", err) |
|||
} |
|||
// Now test and newTest contain the same data.
|
|||
if test.GetLabel() != newTest.GetLabel() { |
|||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) |
|||
} |
|||
// Use a type switch to determine which oneof was set.
|
|||
switch u := test.Union.(type) { |
|||
case *pb.Test_Number: // u.Number contains the number.
|
|||
case *pb.Test_Name: // u.Name contains the string.
|
|||
} |
|||
// etc.
|
|||
} |
|||
*/ |
|||
package proto |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"log" |
|||
"reflect" |
|||
"sort" |
|||
"strconv" |
|||
"sync" |
|||
) |
|||
|
|||
// Message is implemented by generated protocol buffer messages.
|
|||
type Message interface { |
|||
Reset() |
|||
String() string |
|||
ProtoMessage() |
|||
} |
|||
|
|||
// Stats records allocation details about the protocol buffer encoders
|
|||
// and decoders. Useful for tuning the library itself.
|
|||
type Stats struct { |
|||
Emalloc uint64 // mallocs in encode
|
|||
Dmalloc uint64 // mallocs in decode
|
|||
Encode uint64 // number of encodes
|
|||
Decode uint64 // number of decodes
|
|||
Chit uint64 // number of cache hits
|
|||
Cmiss uint64 // number of cache misses
|
|||
Size uint64 // number of sizes
|
|||
} |
|||
|
|||
// Set to true to enable stats collection.
|
|||
const collectStats = false |
|||
|
|||
var stats Stats |
|||
|
|||
// GetStats returns a copy of the global Stats structure.
|
|||
func GetStats() Stats { return stats } |
|||
|
|||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
|||
// protocol buffers. It may be reused between invocations to
|
|||
// reduce memory usage. It is not necessary to use a Buffer;
|
|||
// the global functions Marshal and Unmarshal create a
|
|||
// temporary Buffer and are fine for most applications.
|
|||
type Buffer struct { |
|||
buf []byte // encode/decode byte stream
|
|||
index int // read point
|
|||
|
|||
// pools of basic types to amortize allocation.
|
|||
bools []bool |
|||
uint32s []uint32 |
|||
uint64s []uint64 |
|||
|
|||
// extra pools, only used with pointer_reflect.go
|
|||
int32s []int32 |
|||
int64s []int64 |
|||
float32s []float32 |
|||
float64s []float64 |
|||
} |
|||
|
|||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
|||
// the contents of the argument slice.
|
|||
func NewBuffer(e []byte) *Buffer { |
|||
return &Buffer{buf: e} |
|||
} |
|||
|
|||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
|||
func (p *Buffer) Reset() { |
|||
p.buf = p.buf[0:0] // for reading/writing
|
|||
p.index = 0 // for reading
|
|||
} |
|||
|
|||
// SetBuf replaces the internal buffer with the slice,
|
|||
// ready for unmarshaling the contents of the slice.
|
|||
func (p *Buffer) SetBuf(s []byte) { |
|||
p.buf = s |
|||
p.index = 0 |
|||
} |
|||
|
|||
// Bytes returns the contents of the Buffer.
|
|||
func (p *Buffer) Bytes() []byte { return p.buf } |
|||
|
|||
/* |
|||
* Helper routines for simplifying the creation of optional fields of basic type. |
|||
*/ |
|||
|
|||
// Bool is a helper routine that allocates a new bool value
|
|||
// to store v and returns a pointer to it.
|
|||
func Bool(v bool) *bool { |
|||
return &v |
|||
} |
|||
|
|||
// Int32 is a helper routine that allocates a new int32 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Int32(v int32) *int32 { |
|||
return &v |
|||
} |
|||
|
|||
// Int is a helper routine that allocates a new int32 value
|
|||
// to store v and returns a pointer to it, but unlike Int32
|
|||
// its argument value is an int.
|
|||
func Int(v int) *int32 { |
|||
p := new(int32) |
|||
*p = int32(v) |
|||
return p |
|||
} |
|||
|
|||
// Int64 is a helper routine that allocates a new int64 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Int64(v int64) *int64 { |
|||
return &v |
|||
} |
|||
|
|||
// Float32 is a helper routine that allocates a new float32 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Float32(v float32) *float32 { |
|||
return &v |
|||
} |
|||
|
|||
// Float64 is a helper routine that allocates a new float64 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Float64(v float64) *float64 { |
|||
return &v |
|||
} |
|||
|
|||
// Uint32 is a helper routine that allocates a new uint32 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Uint32(v uint32) *uint32 { |
|||
return &v |
|||
} |
|||
|
|||
// Uint64 is a helper routine that allocates a new uint64 value
|
|||
// to store v and returns a pointer to it.
|
|||
func Uint64(v uint64) *uint64 { |
|||
return &v |
|||
} |
|||
|
|||
// String is a helper routine that allocates a new string value
|
|||
// to store v and returns a pointer to it.
|
|||
func String(v string) *string { |
|||
return &v |
|||
} |
|||
|
|||
// EnumName is a helper function to simplify printing protocol buffer enums
|
|||
// by name. Given an enum map and a value, it returns a useful string.
|
|||
func EnumName(m map[int32]string, v int32) string { |
|||
s, ok := m[v] |
|||
if ok { |
|||
return s |
|||
} |
|||
return strconv.Itoa(int(v)) |
|||
} |
|||
|
|||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
|||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
|||
// names to its int values, and a byte buffer containing the JSON-encoded
|
|||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
|||
//
|
|||
// The function can deal with both JSON representations, numeric and symbolic.
|
|||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { |
|||
if data[0] == '"' { |
|||
// New style: enums are strings.
|
|||
var repr string |
|||
if err := json.Unmarshal(data, &repr); err != nil { |
|||
return -1, err |
|||
} |
|||
val, ok := m[repr] |
|||
if !ok { |
|||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) |
|||
} |
|||
return val, nil |
|||
} |
|||
// Old style: enums are ints.
|
|||
var val int32 |
|||
if err := json.Unmarshal(data, &val); err != nil { |
|||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) |
|||
} |
|||
return val, nil |
|||
} |
|||
|
|||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
|||
// including the string s. Used in testing but made available for general debugging.
|
|||
func (p *Buffer) DebugPrint(s string, b []byte) { |
|||
var u uint64 |
|||
|
|||
obuf := p.buf |
|||
index := p.index |
|||
p.buf = b |
|||
p.index = 0 |
|||
depth := 0 |
|||
|
|||
fmt.Printf("\n--- %s ---\n", s) |
|||
|
|||
out: |
|||
for { |
|||
for i := 0; i < depth; i++ { |
|||
fmt.Print(" ") |
|||
} |
|||
|
|||
index := p.index |
|||
if index == len(p.buf) { |
|||
break |
|||
} |
|||
|
|||
op, err := p.DecodeVarint() |
|||
if err != nil { |
|||
fmt.Printf("%3d: fetching op err %v\n", index, err) |
|||
break out |
|||
} |
|||
tag := op >> 3 |
|||
wire := op & 7 |
|||
|
|||
switch wire { |
|||
default: |
|||
fmt.Printf("%3d: t=%3d unknown wire=%d\n", |
|||
index, tag, wire) |
|||
break out |
|||
|
|||
case WireBytes: |
|||
var r []byte |
|||
|
|||
r, err = p.DecodeRawBytes(false) |
|||
if err != nil { |
|||
break out |
|||
} |
|||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) |
|||
if len(r) <= 6 { |
|||
for i := 0; i < len(r); i++ { |
|||
fmt.Printf(" %.2x", r[i]) |
|||
} |
|||
} else { |
|||
for i := 0; i < 3; i++ { |
|||
fmt.Printf(" %.2x", r[i]) |
|||
} |
|||
fmt.Printf(" ..") |
|||
for i := len(r) - 3; i < len(r); i++ { |
|||
fmt.Printf(" %.2x", r[i]) |
|||
} |
|||
} |
|||
fmt.Printf("\n") |
|||
|
|||
case WireFixed32: |
|||
u, err = p.DecodeFixed32() |
|||
if err != nil { |
|||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) |
|||
break out |
|||
} |
|||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) |
|||
|
|||
case WireFixed64: |
|||
u, err = p.DecodeFixed64() |
|||
if err != nil { |
|||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) |
|||
break out |
|||
} |
|||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) |
|||
|
|||
case WireVarint: |
|||
u, err = p.DecodeVarint() |
|||
if err != nil { |
|||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) |
|||
break out |
|||
} |
|||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) |
|||
|
|||
case WireStartGroup: |
|||
fmt.Printf("%3d: t=%3d start\n", index, tag) |
|||
depth++ |
|||
|
|||
case WireEndGroup: |
|||
depth-- |
|||
fmt.Printf("%3d: t=%3d end\n", index, tag) |
|||
} |
|||
} |
|||
|
|||
if depth != 0 { |
|||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) |
|||
} |
|||
fmt.Printf("\n") |
|||
|
|||
p.buf = obuf |
|||
p.index = index |
|||
} |
|||
|
|||
// SetDefaults sets unset protocol buffer fields to their default values.
|
|||
// It only modifies fields that are both unset and have defined defaults.
|
|||
// It recursively sets default values in any non-nil sub-messages.
|
|||
func SetDefaults(pb Message) { |
|||
setDefaults(reflect.ValueOf(pb), true, false) |
|||
} |
|||
|
|||
// v is a pointer to a struct.
|
|||
func setDefaults(v reflect.Value, recur, zeros bool) { |
|||
v = v.Elem() |
|||
|
|||
defaultMu.RLock() |
|||
dm, ok := defaults[v.Type()] |
|||
defaultMu.RUnlock() |
|||
if !ok { |
|||
dm = buildDefaultMessage(v.Type()) |
|||
defaultMu.Lock() |
|||
defaults[v.Type()] = dm |
|||
defaultMu.Unlock() |
|||
} |
|||
|
|||
for _, sf := range dm.scalars { |
|||
f := v.Field(sf.index) |
|||
if !f.IsNil() { |
|||
// field already set
|
|||
continue |
|||
} |
|||
dv := sf.value |
|||
if dv == nil && !zeros { |
|||
// no explicit default, and don't want to set zeros
|
|||
continue |
|||
} |
|||
fptr := f.Addr().Interface() // **T
|
|||
// TODO: Consider batching the allocations we do here.
|
|||
switch sf.kind { |
|||
case reflect.Bool: |
|||
b := new(bool) |
|||
if dv != nil { |
|||
*b = dv.(bool) |
|||
} |
|||
*(fptr.(**bool)) = b |
|||
case reflect.Float32: |
|||
f := new(float32) |
|||
if dv != nil { |
|||
*f = dv.(float32) |
|||
} |
|||
*(fptr.(**float32)) = f |
|||
case reflect.Float64: |
|||
f := new(float64) |
|||
if dv != nil { |
|||
*f = dv.(float64) |
|||
} |
|||
*(fptr.(**float64)) = f |
|||
case reflect.Int32: |
|||
// might be an enum
|
|||
if ft := f.Type(); ft != int32PtrType { |
|||
// enum
|
|||
f.Set(reflect.New(ft.Elem())) |
|||
if dv != nil { |
|||
f.Elem().SetInt(int64(dv.(int32))) |
|||
} |
|||
} else { |
|||
// int32 field
|
|||
i := new(int32) |
|||
if dv != nil { |
|||
*i = dv.(int32) |
|||
} |
|||
*(fptr.(**int32)) = i |
|||
} |
|||
case reflect.Int64: |
|||
i := new(int64) |
|||
if dv != nil { |
|||
*i = dv.(int64) |
|||
} |
|||
*(fptr.(**int64)) = i |
|||
case reflect.String: |
|||
s := new(string) |
|||
if dv != nil { |
|||
*s = dv.(string) |
|||
} |
|||
*(fptr.(**string)) = s |
|||
case reflect.Uint8: |
|||
// exceptional case: []byte
|
|||
var b []byte |
|||
if dv != nil { |
|||
db := dv.([]byte) |
|||
b = make([]byte, len(db)) |
|||
copy(b, db) |
|||
} else { |
|||
b = []byte{} |
|||
} |
|||
*(fptr.(*[]byte)) = b |
|||
case reflect.Uint32: |
|||
u := new(uint32) |
|||
if dv != nil { |
|||
*u = dv.(uint32) |
|||
} |
|||
*(fptr.(**uint32)) = u |
|||
case reflect.Uint64: |
|||
u := new(uint64) |
|||
if dv != nil { |
|||
*u = dv.(uint64) |
|||
} |
|||
*(fptr.(**uint64)) = u |
|||
default: |
|||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) |
|||
} |
|||
} |
|||
|
|||
for _, ni := range dm.nested { |
|||
f := v.Field(ni) |
|||
// f is *T or []*T or map[T]*T
|
|||
switch f.Kind() { |
|||
case reflect.Ptr: |
|||
if f.IsNil() { |
|||
continue |
|||
} |
|||
setDefaults(f, recur, zeros) |
|||
|
|||
case reflect.Slice: |
|||
for i := 0; i < f.Len(); i++ { |
|||
e := f.Index(i) |
|||
if e.IsNil() { |
|||
continue |
|||
} |
|||
setDefaults(e, recur, zeros) |
|||
} |
|||
|
|||
case reflect.Map: |
|||
for _, k := range f.MapKeys() { |
|||
e := f.MapIndex(k) |
|||
if e.IsNil() { |
|||
continue |
|||
} |
|||
setDefaults(e, recur, zeros) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
var ( |
|||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
|||
// with its scalar fields set to their proto-declared non-zero default values.
|
|||
defaultMu sync.RWMutex |
|||
defaults = make(map[reflect.Type]defaultMessage) |
|||
|
|||
int32PtrType = reflect.TypeOf((*int32)(nil)) |
|||
) |
|||
|
|||
// defaultMessage represents information about the default values of a message.
|
|||
type defaultMessage struct { |
|||
scalars []scalarField |
|||
nested []int // struct field index of nested messages
|
|||
} |
|||
|
|||
type scalarField struct { |
|||
index int // struct field index
|
|||
kind reflect.Kind // element type (the T in *T or []T)
|
|||
value interface{} // the proto-declared default value, or nil
|
|||
} |
|||
|
|||
// t is a struct type.
|
|||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { |
|||
sprop := GetProperties(t) |
|||
for _, prop := range sprop.Prop { |
|||
fi, ok := sprop.decoderTags.get(prop.Tag) |
|||
if !ok { |
|||
// XXX_unrecognized
|
|||
continue |
|||
} |
|||
ft := t.Field(fi).Type |
|||
|
|||
sf, nested, err := fieldDefault(ft, prop) |
|||
switch { |
|||
case err != nil: |
|||
log.Print(err) |
|||
case nested: |
|||
dm.nested = append(dm.nested, fi) |
|||
case sf != nil: |
|||
sf.index = fi |
|||
dm.scalars = append(dm.scalars, *sf) |
|||
} |
|||
} |
|||
|
|||
return dm |
|||
} |
|||
|
|||
// fieldDefault returns the scalarField for field type ft.
|
|||
// sf will be nil if the field can not have a default.
|
|||
// nestedMessage will be true if this is a nested message.
|
|||
// Note that sf.index is not set on return.
|
|||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { |
|||
var canHaveDefault bool |
|||
switch ft.Kind() { |
|||
case reflect.Ptr: |
|||
if ft.Elem().Kind() == reflect.Struct { |
|||
nestedMessage = true |
|||
} else { |
|||
canHaveDefault = true // proto2 scalar field
|
|||
} |
|||
|
|||
case reflect.Slice: |
|||
switch ft.Elem().Kind() { |
|||
case reflect.Ptr: |
|||
nestedMessage = true // repeated message
|
|||
case reflect.Uint8: |
|||
canHaveDefault = true // bytes field
|
|||
} |
|||
|
|||
case reflect.Map: |
|||
if ft.Elem().Kind() == reflect.Ptr { |
|||
nestedMessage = true // map with message values
|
|||
} |
|||
} |
|||
|
|||
if !canHaveDefault { |
|||
if nestedMessage { |
|||
return nil, true, nil |
|||
} |
|||
return nil, false, nil |
|||
} |
|||
|
|||
// We now know that ft is a pointer or slice.
|
|||
sf = &scalarField{kind: ft.Elem().Kind()} |
|||
|
|||
// scalar fields without defaults
|
|||
if !prop.HasDefault { |
|||
return sf, false, nil |
|||
} |
|||
|
|||
// a scalar field: either *T or []byte
|
|||
switch ft.Elem().Kind() { |
|||
case reflect.Bool: |
|||
x, err := strconv.ParseBool(prop.Default) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = x |
|||
case reflect.Float32: |
|||
x, err := strconv.ParseFloat(prop.Default, 32) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = float32(x) |
|||
case reflect.Float64: |
|||
x, err := strconv.ParseFloat(prop.Default, 64) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = x |
|||
case reflect.Int32: |
|||
x, err := strconv.ParseInt(prop.Default, 10, 32) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = int32(x) |
|||
case reflect.Int64: |
|||
x, err := strconv.ParseInt(prop.Default, 10, 64) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = x |
|||
case reflect.String: |
|||
sf.value = prop.Default |
|||
case reflect.Uint8: |
|||
// []byte (not *uint8)
|
|||
sf.value = []byte(prop.Default) |
|||
case reflect.Uint32: |
|||
x, err := strconv.ParseUint(prop.Default, 10, 32) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = uint32(x) |
|||
case reflect.Uint64: |
|||
x, err := strconv.ParseUint(prop.Default, 10, 64) |
|||
if err != nil { |
|||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) |
|||
} |
|||
sf.value = x |
|||
default: |
|||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) |
|||
} |
|||
|
|||
return sf, false, nil |
|||
} |
|||
|
|||
// Map fields may have key types of non-float scalars, strings and enums.
|
|||
// The easiest way to sort them in some deterministic order is to use fmt.
|
|||
// If this turns out to be inefficient we can always consider other options,
|
|||
// such as doing a Schwartzian transform.
|
|||
|
|||
func mapKeys(vs []reflect.Value) sort.Interface { |
|||
s := mapKeySorter{ |
|||
vs: vs, |
|||
// default Less function: textual comparison
|
|||
less: func(a, b reflect.Value) bool { |
|||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) |
|||
}, |
|||
} |
|||
|
|||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
|||
// numeric keys are sorted numerically.
|
|||
if len(vs) == 0 { |
|||
return s |
|||
} |
|||
switch vs[0].Kind() { |
|||
case reflect.Int32, reflect.Int64: |
|||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } |
|||
case reflect.Uint32, reflect.Uint64: |
|||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } |
|||
} |
|||
|
|||
return s |
|||
} |
|||
|
|||
type mapKeySorter struct { |
|||
vs []reflect.Value |
|||
less func(a, b reflect.Value) bool |
|||
} |
|||
|
|||
func (s mapKeySorter) Len() int { return len(s.vs) } |
|||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } |
|||
func (s mapKeySorter) Less(i, j int) bool { |
|||
return s.less(s.vs[i], s.vs[j]) |
|||
} |
|||
|
|||
// isProto3Zero reports whether v is a zero proto3 value.
|
|||
func isProto3Zero(v reflect.Value) bool { |
|||
switch v.Kind() { |
|||
case reflect.Bool: |
|||
return !v.Bool() |
|||
case reflect.Int32, reflect.Int64: |
|||
return v.Int() == 0 |
|||
case reflect.Uint32, reflect.Uint64: |
|||
return v.Uint() == 0 |
|||
case reflect.Float32, reflect.Float64: |
|||
return v.Float() == 0 |
|||
case reflect.String: |
|||
return v.String() == "" |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
|||
// to assert that that code is compatible with this version of the proto package.
|
|||
const ProtoPackageIsVersion2 = true |
|||
|
|||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
|||
// to assert that that code is compatible with this version of the proto package.
|
|||
const ProtoPackageIsVersion1 = true |
|||
@ -0,0 +1,311 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
/* |
|||
* Support for message sets. |
|||
*/ |
|||
|
|||
import ( |
|||
"bytes" |
|||
"encoding/json" |
|||
"errors" |
|||
"fmt" |
|||
"reflect" |
|||
"sort" |
|||
) |
|||
|
|||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
|||
// A message type ID is required for storing a protocol buffer in a message set.
|
|||
var errNoMessageTypeID = errors.New("proto does not have a message type ID") |
|||
|
|||
// The first two types (_MessageSet_Item and messageSet)
|
|||
// model what the protocol compiler produces for the following protocol message:
|
|||
// message MessageSet {
|
|||
// repeated group Item = 1 {
|
|||
// required int32 type_id = 2;
|
|||
// required string message = 3;
|
|||
// };
|
|||
// }
|
|||
// That is the MessageSet wire format. We can't use a proto to generate these
|
|||
// because that would introduce a circular dependency between it and this package.
|
|||
|
|||
type _MessageSet_Item struct { |
|||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"` |
|||
Message []byte `protobuf:"bytes,3,req,name=message"` |
|||
} |
|||
|
|||
type messageSet struct { |
|||
Item []*_MessageSet_Item `protobuf:"group,1,rep"` |
|||
XXX_unrecognized []byte |
|||
// TODO: caching?
|
|||
} |
|||
|
|||
// Make sure messageSet is a Message.
|
|||
var _ Message = (*messageSet)(nil) |
|||
|
|||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
|||
// that may be stored in a MessageSet.
|
|||
type messageTypeIder interface { |
|||
MessageTypeId() int32 |
|||
} |
|||
|
|||
func (ms *messageSet) find(pb Message) *_MessageSet_Item { |
|||
mti, ok := pb.(messageTypeIder) |
|||
if !ok { |
|||
return nil |
|||
} |
|||
id := mti.MessageTypeId() |
|||
for _, item := range ms.Item { |
|||
if *item.TypeId == id { |
|||
return item |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (ms *messageSet) Has(pb Message) bool { |
|||
if ms.find(pb) != nil { |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (ms *messageSet) Unmarshal(pb Message) error { |
|||
if item := ms.find(pb); item != nil { |
|||
return Unmarshal(item.Message, pb) |
|||
} |
|||
if _, ok := pb.(messageTypeIder); !ok { |
|||
return errNoMessageTypeID |
|||
} |
|||
return nil // TODO: return error instead?
|
|||
} |
|||
|
|||
func (ms *messageSet) Marshal(pb Message) error { |
|||
msg, err := Marshal(pb) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if item := ms.find(pb); item != nil { |
|||
// reuse existing item
|
|||
item.Message = msg |
|||
return nil |
|||
} |
|||
|
|||
mti, ok := pb.(messageTypeIder) |
|||
if !ok { |
|||
return errNoMessageTypeID |
|||
} |
|||
|
|||
mtid := mti.MessageTypeId() |
|||
ms.Item = append(ms.Item, &_MessageSet_Item{ |
|||
TypeId: &mtid, |
|||
Message: msg, |
|||
}) |
|||
return nil |
|||
} |
|||
|
|||
func (ms *messageSet) Reset() { *ms = messageSet{} } |
|||
func (ms *messageSet) String() string { return CompactTextString(ms) } |
|||
func (*messageSet) ProtoMessage() {} |
|||
|
|||
// Support for the message_set_wire_format message option.
|
|||
|
|||
func skipVarint(buf []byte) []byte { |
|||
i := 0 |
|||
for ; buf[i]&0x80 != 0; i++ { |
|||
} |
|||
return buf[i+1:] |
|||
} |
|||
|
|||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
|||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
|||
func MarshalMessageSet(exts interface{}) ([]byte, error) { |
|||
var m map[int32]Extension |
|||
switch exts := exts.(type) { |
|||
case *XXX_InternalExtensions: |
|||
if err := encodeExtensions(exts); err != nil { |
|||
return nil, err |
|||
} |
|||
m, _ = exts.extensionsRead() |
|||
case map[int32]Extension: |
|||
if err := encodeExtensionsMap(exts); err != nil { |
|||
return nil, err |
|||
} |
|||
m = exts |
|||
default: |
|||
return nil, errors.New("proto: not an extension map") |
|||
} |
|||
|
|||
// Sort extension IDs to provide a deterministic encoding.
|
|||
// See also enc_map in encode.go.
|
|||
ids := make([]int, 0, len(m)) |
|||
for id := range m { |
|||
ids = append(ids, int(id)) |
|||
} |
|||
sort.Ints(ids) |
|||
|
|||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} |
|||
for _, id := range ids { |
|||
e := m[int32(id)] |
|||
// Remove the wire type and field number varint, as well as the length varint.
|
|||
msg := skipVarint(skipVarint(e.enc)) |
|||
|
|||
ms.Item = append(ms.Item, &_MessageSet_Item{ |
|||
TypeId: Int32(int32(id)), |
|||
Message: msg, |
|||
}) |
|||
} |
|||
return Marshal(ms) |
|||
} |
|||
|
|||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
|||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
|||
func UnmarshalMessageSet(buf []byte, exts interface{}) error { |
|||
var m map[int32]Extension |
|||
switch exts := exts.(type) { |
|||
case *XXX_InternalExtensions: |
|||
m = exts.extensionsWrite() |
|||
case map[int32]Extension: |
|||
m = exts |
|||
default: |
|||
return errors.New("proto: not an extension map") |
|||
} |
|||
|
|||
ms := new(messageSet) |
|||
if err := Unmarshal(buf, ms); err != nil { |
|||
return err |
|||
} |
|||
for _, item := range ms.Item { |
|||
id := *item.TypeId |
|||
msg := item.Message |
|||
|
|||
// Restore wire type and field number varint, plus length varint.
|
|||
// Be careful to preserve duplicate items.
|
|||
b := EncodeVarint(uint64(id)<<3 | WireBytes) |
|||
if ext, ok := m[id]; ok { |
|||
// Existing data; rip off the tag and length varint
|
|||
// so we join the new data correctly.
|
|||
// We can assume that ext.enc is set because we are unmarshaling.
|
|||
o := ext.enc[len(b):] // skip wire type and field number
|
|||
_, n := DecodeVarint(o) // calculate length of length varint
|
|||
o = o[n:] // skip length varint
|
|||
msg = append(o, msg...) // join old data and new data
|
|||
} |
|||
b = append(b, EncodeVarint(uint64(len(msg)))...) |
|||
b = append(b, msg...) |
|||
|
|||
m[id] = Extension{enc: b} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
|||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
|||
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { |
|||
var m map[int32]Extension |
|||
switch exts := exts.(type) { |
|||
case *XXX_InternalExtensions: |
|||
m, _ = exts.extensionsRead() |
|||
case map[int32]Extension: |
|||
m = exts |
|||
default: |
|||
return nil, errors.New("proto: not an extension map") |
|||
} |
|||
var b bytes.Buffer |
|||
b.WriteByte('{') |
|||
|
|||
// Process the map in key order for deterministic output.
|
|||
ids := make([]int32, 0, len(m)) |
|||
for id := range m { |
|||
ids = append(ids, id) |
|||
} |
|||
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
|
|||
|
|||
for i, id := range ids { |
|||
ext := m[id] |
|||
if i > 0 { |
|||
b.WriteByte(',') |
|||
} |
|||
|
|||
msd, ok := messageSetMap[id] |
|||
if !ok { |
|||
// Unknown type; we can't render it, so skip it.
|
|||
continue |
|||
} |
|||
fmt.Fprintf(&b, `"[%s]":`, msd.name) |
|||
|
|||
x := ext.value |
|||
if x == nil { |
|||
x = reflect.New(msd.t.Elem()).Interface() |
|||
if err := Unmarshal(ext.enc, x.(Message)); err != nil { |
|||
return nil, err |
|||
} |
|||
} |
|||
d, err := json.Marshal(x) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
b.Write(d) |
|||
} |
|||
b.WriteByte('}') |
|||
return b.Bytes(), nil |
|||
} |
|||
|
|||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
|||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
|||
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { |
|||
// Common-case fast path.
|
|||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { |
|||
return nil |
|||
} |
|||
|
|||
// This is fairly tricky, and it's not clear that it is needed.
|
|||
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") |
|||
} |
|||
|
|||
// A global registry of types that can be used in a MessageSet.
|
|||
|
|||
var messageSetMap = make(map[int32]messageSetDesc) |
|||
|
|||
type messageSetDesc struct { |
|||
t reflect.Type // pointer to struct
|
|||
name string |
|||
} |
|||
|
|||
// RegisterMessageSetType is called from the generated code.
|
|||
func RegisterMessageSetType(m Message, fieldNum int32, name string) { |
|||
messageSetMap[fieldNum] = messageSetDesc{ |
|||
t: reflect.TypeOf(m), |
|||
name: name, |
|||
} |
|||
} |
|||
@ -0,0 +1,66 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"bytes" |
|||
"testing" |
|||
) |
|||
|
|||
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { |
|||
// Check that a repeated message set entry will be concatenated.
|
|||
in := &messageSet{ |
|||
Item: []*_MessageSet_Item{ |
|||
{TypeId: Int32(12345), Message: []byte("hoo")}, |
|||
{TypeId: Int32(12345), Message: []byte("hah")}, |
|||
}, |
|||
} |
|||
b, err := Marshal(in) |
|||
if err != nil { |
|||
t.Fatalf("Marshal: %v", err) |
|||
} |
|||
t.Logf("Marshaled bytes: %q", b) |
|||
|
|||
var extensions XXX_InternalExtensions |
|||
if err := UnmarshalMessageSet(b, &extensions); err != nil { |
|||
t.Fatalf("UnmarshalMessageSet: %v", err) |
|||
} |
|||
ext, ok := extensions.p.extensionMap[12345] |
|||
if !ok { |
|||
t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap) |
|||
} |
|||
// Skip wire type/field number and length varints.
|
|||
got := skipVarint(skipVarint(ext.enc)) |
|||
if want := []byte("hoohah"); !bytes.Equal(got, want) { |
|||
t.Errorf("Combined extension is %q, want %q", got, want) |
|||
} |
|||
} |
|||
@ -0,0 +1,484 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// +build appengine js
|
|||
|
|||
// This file contains an implementation of proto field accesses using package reflect.
|
|||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
|||
// be used on App Engine.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"math" |
|||
"reflect" |
|||
) |
|||
|
|||
// A structPointer is a pointer to a struct.
|
|||
type structPointer struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
|||
// The reflect value must itself be a pointer to a struct.
|
|||
func toStructPointer(v reflect.Value) structPointer { |
|||
return structPointer{v} |
|||
} |
|||
|
|||
// IsNil reports whether p is nil.
|
|||
func structPointer_IsNil(p structPointer) bool { |
|||
return p.v.IsNil() |
|||
} |
|||
|
|||
// Interface returns the struct pointer as an interface value.
|
|||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { |
|||
return p.v.Interface() |
|||
} |
|||
|
|||
// A field identifies a field in a struct, accessible from a structPointer.
|
|||
// In this implementation, a field is identified by the sequence of field indices
|
|||
// passed to reflect's FieldByIndex.
|
|||
type field []int |
|||
|
|||
// toField returns a field equivalent to the given reflect field.
|
|||
func toField(f *reflect.StructField) field { |
|||
return f.Index |
|||
} |
|||
|
|||
// invalidField is an invalid field identifier.
|
|||
var invalidField = field(nil) |
|||
|
|||
// IsValid reports whether the field identifier is valid.
|
|||
func (f field) IsValid() bool { return f != nil } |
|||
|
|||
// field returns the given field in the struct as a reflect value.
|
|||
func structPointer_field(p structPointer, f field) reflect.Value { |
|||
// Special case: an extension map entry with a value of type T
|
|||
// passes a *T to the struct-handling code with a zero field,
|
|||
// expecting that it will be treated as equivalent to *struct{ X T },
|
|||
// which has the same memory layout. We have to handle that case
|
|||
// specially, because reflect will panic if we call FieldByIndex on a
|
|||
// non-struct.
|
|||
if f == nil { |
|||
return p.v.Elem() |
|||
} |
|||
|
|||
return p.v.Elem().FieldByIndex(f) |
|||
} |
|||
|
|||
// ifield returns the given field in the struct as an interface value.
|
|||
func structPointer_ifield(p structPointer, f field) interface{} { |
|||
return structPointer_field(p, f).Addr().Interface() |
|||
} |
|||
|
|||
// Bytes returns the address of a []byte field in the struct.
|
|||
func structPointer_Bytes(p structPointer, f field) *[]byte { |
|||
return structPointer_ifield(p, f).(*[]byte) |
|||
} |
|||
|
|||
// BytesSlice returns the address of a [][]byte field in the struct.
|
|||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte { |
|||
return structPointer_ifield(p, f).(*[][]byte) |
|||
} |
|||
|
|||
// Bool returns the address of a *bool field in the struct.
|
|||
func structPointer_Bool(p structPointer, f field) **bool { |
|||
return structPointer_ifield(p, f).(**bool) |
|||
} |
|||
|
|||
// BoolVal returns the address of a bool field in the struct.
|
|||
func structPointer_BoolVal(p structPointer, f field) *bool { |
|||
return structPointer_ifield(p, f).(*bool) |
|||
} |
|||
|
|||
// BoolSlice returns the address of a []bool field in the struct.
|
|||
func structPointer_BoolSlice(p structPointer, f field) *[]bool { |
|||
return structPointer_ifield(p, f).(*[]bool) |
|||
} |
|||
|
|||
// String returns the address of a *string field in the struct.
|
|||
func structPointer_String(p structPointer, f field) **string { |
|||
return structPointer_ifield(p, f).(**string) |
|||
} |
|||
|
|||
// StringVal returns the address of a string field in the struct.
|
|||
func structPointer_StringVal(p structPointer, f field) *string { |
|||
return structPointer_ifield(p, f).(*string) |
|||
} |
|||
|
|||
// StringSlice returns the address of a []string field in the struct.
|
|||
func structPointer_StringSlice(p structPointer, f field) *[]string { |
|||
return structPointer_ifield(p, f).(*[]string) |
|||
} |
|||
|
|||
// Extensions returns the address of an extension map field in the struct.
|
|||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { |
|||
return structPointer_ifield(p, f).(*XXX_InternalExtensions) |
|||
} |
|||
|
|||
// ExtMap returns the address of an extension map field in the struct.
|
|||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { |
|||
return structPointer_ifield(p, f).(*map[int32]Extension) |
|||
} |
|||
|
|||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { |
|||
return structPointer_field(p, f).Addr() |
|||
} |
|||
|
|||
// SetStructPointer writes a *struct field in the struct.
|
|||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { |
|||
structPointer_field(p, f).Set(q.v) |
|||
} |
|||
|
|||
// GetStructPointer reads a *struct field in the struct.
|
|||
func structPointer_GetStructPointer(p structPointer, f field) structPointer { |
|||
return structPointer{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// StructPointerSlice the address of a []*struct field in the struct.
|
|||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { |
|||
return structPointerSlice{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// A structPointerSlice represents the address of a slice of pointers to structs
|
|||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
|||
type structPointerSlice struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
func (p structPointerSlice) Len() int { return p.v.Len() } |
|||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } |
|||
func (p structPointerSlice) Append(q structPointer) { |
|||
p.v.Set(reflect.Append(p.v, q.v)) |
|||
} |
|||
|
|||
var ( |
|||
int32Type = reflect.TypeOf(int32(0)) |
|||
uint32Type = reflect.TypeOf(uint32(0)) |
|||
float32Type = reflect.TypeOf(float32(0)) |
|||
int64Type = reflect.TypeOf(int64(0)) |
|||
uint64Type = reflect.TypeOf(uint64(0)) |
|||
float64Type = reflect.TypeOf(float64(0)) |
|||
) |
|||
|
|||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
|||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
|||
type word32 struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
// IsNil reports whether p is nil.
|
|||
func word32_IsNil(p word32) bool { |
|||
return p.v.IsNil() |
|||
} |
|||
|
|||
// Set sets p to point at a newly allocated word with bits set to x.
|
|||
func word32_Set(p word32, o *Buffer, x uint32) { |
|||
t := p.v.Type().Elem() |
|||
switch t { |
|||
case int32Type: |
|||
if len(o.int32s) == 0 { |
|||
o.int32s = make([]int32, uint32PoolSize) |
|||
} |
|||
o.int32s[0] = int32(x) |
|||
p.v.Set(reflect.ValueOf(&o.int32s[0])) |
|||
o.int32s = o.int32s[1:] |
|||
return |
|||
case uint32Type: |
|||
if len(o.uint32s) == 0 { |
|||
o.uint32s = make([]uint32, uint32PoolSize) |
|||
} |
|||
o.uint32s[0] = x |
|||
p.v.Set(reflect.ValueOf(&o.uint32s[0])) |
|||
o.uint32s = o.uint32s[1:] |
|||
return |
|||
case float32Type: |
|||
if len(o.float32s) == 0 { |
|||
o.float32s = make([]float32, uint32PoolSize) |
|||
} |
|||
o.float32s[0] = math.Float32frombits(x) |
|||
p.v.Set(reflect.ValueOf(&o.float32s[0])) |
|||
o.float32s = o.float32s[1:] |
|||
return |
|||
} |
|||
|
|||
// must be enum
|
|||
p.v.Set(reflect.New(t)) |
|||
p.v.Elem().SetInt(int64(int32(x))) |
|||
} |
|||
|
|||
// Get gets the bits pointed at by p, as a uint32.
|
|||
func word32_Get(p word32) uint32 { |
|||
elem := p.v.Elem() |
|||
switch elem.Kind() { |
|||
case reflect.Int32: |
|||
return uint32(elem.Int()) |
|||
case reflect.Uint32: |
|||
return uint32(elem.Uint()) |
|||
case reflect.Float32: |
|||
return math.Float32bits(float32(elem.Float())) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
|||
func structPointer_Word32(p structPointer, f field) word32 { |
|||
return word32{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
|||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
|||
type word32Val struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
// Set sets *p to x.
|
|||
func word32Val_Set(p word32Val, x uint32) { |
|||
switch p.v.Type() { |
|||
case int32Type: |
|||
p.v.SetInt(int64(x)) |
|||
return |
|||
case uint32Type: |
|||
p.v.SetUint(uint64(x)) |
|||
return |
|||
case float32Type: |
|||
p.v.SetFloat(float64(math.Float32frombits(x))) |
|||
return |
|||
} |
|||
|
|||
// must be enum
|
|||
p.v.SetInt(int64(int32(x))) |
|||
} |
|||
|
|||
// Get gets the bits pointed at by p, as a uint32.
|
|||
func word32Val_Get(p word32Val) uint32 { |
|||
elem := p.v |
|||
switch elem.Kind() { |
|||
case reflect.Int32: |
|||
return uint32(elem.Int()) |
|||
case reflect.Uint32: |
|||
return uint32(elem.Uint()) |
|||
case reflect.Float32: |
|||
return math.Float32bits(float32(elem.Float())) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
|||
func structPointer_Word32Val(p structPointer, f field) word32Val { |
|||
return word32Val{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// A word32Slice is a slice of 32-bit values.
|
|||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
|||
type word32Slice struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
func (p word32Slice) Append(x uint32) { |
|||
n, m := p.v.Len(), p.v.Cap() |
|||
if n < m { |
|||
p.v.SetLen(n + 1) |
|||
} else { |
|||
t := p.v.Type().Elem() |
|||
p.v.Set(reflect.Append(p.v, reflect.Zero(t))) |
|||
} |
|||
elem := p.v.Index(n) |
|||
switch elem.Kind() { |
|||
case reflect.Int32: |
|||
elem.SetInt(int64(int32(x))) |
|||
case reflect.Uint32: |
|||
elem.SetUint(uint64(x)) |
|||
case reflect.Float32: |
|||
elem.SetFloat(float64(math.Float32frombits(x))) |
|||
} |
|||
} |
|||
|
|||
func (p word32Slice) Len() int { |
|||
return p.v.Len() |
|||
} |
|||
|
|||
func (p word32Slice) Index(i int) uint32 { |
|||
elem := p.v.Index(i) |
|||
switch elem.Kind() { |
|||
case reflect.Int32: |
|||
return uint32(elem.Int()) |
|||
case reflect.Uint32: |
|||
return uint32(elem.Uint()) |
|||
case reflect.Float32: |
|||
return math.Float32bits(float32(elem.Float())) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
|||
func structPointer_Word32Slice(p structPointer, f field) word32Slice { |
|||
return word32Slice{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// word64 is like word32 but for 64-bit values.
|
|||
type word64 struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
func word64_Set(p word64, o *Buffer, x uint64) { |
|||
t := p.v.Type().Elem() |
|||
switch t { |
|||
case int64Type: |
|||
if len(o.int64s) == 0 { |
|||
o.int64s = make([]int64, uint64PoolSize) |
|||
} |
|||
o.int64s[0] = int64(x) |
|||
p.v.Set(reflect.ValueOf(&o.int64s[0])) |
|||
o.int64s = o.int64s[1:] |
|||
return |
|||
case uint64Type: |
|||
if len(o.uint64s) == 0 { |
|||
o.uint64s = make([]uint64, uint64PoolSize) |
|||
} |
|||
o.uint64s[0] = x |
|||
p.v.Set(reflect.ValueOf(&o.uint64s[0])) |
|||
o.uint64s = o.uint64s[1:] |
|||
return |
|||
case float64Type: |
|||
if len(o.float64s) == 0 { |
|||
o.float64s = make([]float64, uint64PoolSize) |
|||
} |
|||
o.float64s[0] = math.Float64frombits(x) |
|||
p.v.Set(reflect.ValueOf(&o.float64s[0])) |
|||
o.float64s = o.float64s[1:] |
|||
return |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
func word64_IsNil(p word64) bool { |
|||
return p.v.IsNil() |
|||
} |
|||
|
|||
func word64_Get(p word64) uint64 { |
|||
elem := p.v.Elem() |
|||
switch elem.Kind() { |
|||
case reflect.Int64: |
|||
return uint64(elem.Int()) |
|||
case reflect.Uint64: |
|||
return elem.Uint() |
|||
case reflect.Float64: |
|||
return math.Float64bits(elem.Float()) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
func structPointer_Word64(p structPointer, f field) word64 { |
|||
return word64{structPointer_field(p, f)} |
|||
} |
|||
|
|||
// word64Val is like word32Val but for 64-bit values.
|
|||
type word64Val struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
func word64Val_Set(p word64Val, o *Buffer, x uint64) { |
|||
switch p.v.Type() { |
|||
case int64Type: |
|||
p.v.SetInt(int64(x)) |
|||
return |
|||
case uint64Type: |
|||
p.v.SetUint(x) |
|||
return |
|||
case float64Type: |
|||
p.v.SetFloat(math.Float64frombits(x)) |
|||
return |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
func word64Val_Get(p word64Val) uint64 { |
|||
elem := p.v |
|||
switch elem.Kind() { |
|||
case reflect.Int64: |
|||
return uint64(elem.Int()) |
|||
case reflect.Uint64: |
|||
return elem.Uint() |
|||
case reflect.Float64: |
|||
return math.Float64bits(elem.Float()) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
func structPointer_Word64Val(p structPointer, f field) word64Val { |
|||
return word64Val{structPointer_field(p, f)} |
|||
} |
|||
|
|||
type word64Slice struct { |
|||
v reflect.Value |
|||
} |
|||
|
|||
func (p word64Slice) Append(x uint64) { |
|||
n, m := p.v.Len(), p.v.Cap() |
|||
if n < m { |
|||
p.v.SetLen(n + 1) |
|||
} else { |
|||
t := p.v.Type().Elem() |
|||
p.v.Set(reflect.Append(p.v, reflect.Zero(t))) |
|||
} |
|||
elem := p.v.Index(n) |
|||
switch elem.Kind() { |
|||
case reflect.Int64: |
|||
elem.SetInt(int64(int64(x))) |
|||
case reflect.Uint64: |
|||
elem.SetUint(uint64(x)) |
|||
case reflect.Float64: |
|||
elem.SetFloat(float64(math.Float64frombits(x))) |
|||
} |
|||
} |
|||
|
|||
func (p word64Slice) Len() int { |
|||
return p.v.Len() |
|||
} |
|||
|
|||
func (p word64Slice) Index(i int) uint64 { |
|||
elem := p.v.Index(i) |
|||
switch elem.Kind() { |
|||
case reflect.Int64: |
|||
return uint64(elem.Int()) |
|||
case reflect.Uint64: |
|||
return uint64(elem.Uint()) |
|||
case reflect.Float64: |
|||
return math.Float64bits(float64(elem.Float())) |
|||
} |
|||
panic("unreachable") |
|||
} |
|||
|
|||
func structPointer_Word64Slice(p structPointer, f field) word64Slice { |
|||
return word64Slice{structPointer_field(p, f)} |
|||
} |
|||
@ -0,0 +1,270 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// +build !appengine,!js
|
|||
|
|||
// This file contains the implementation of the proto field accesses using package unsafe.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"reflect" |
|||
"unsafe" |
|||
) |
|||
|
|||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
|||
// but Go does not allow methods on pointer types, and we must preserve
|
|||
// some pointer type for the garbage collector. We use these
|
|||
// funcs with clunky names as our poor approximation to methods.
|
|||
//
|
|||
// An alternative would be
|
|||
// type structPointer struct { p unsafe.Pointer }
|
|||
// but that does not registerize as well.
|
|||
|
|||
// A structPointer is a pointer to a struct.
|
|||
type structPointer unsafe.Pointer |
|||
|
|||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
|||
func toStructPointer(v reflect.Value) structPointer { |
|||
return structPointer(unsafe.Pointer(v.Pointer())) |
|||
} |
|||
|
|||
// IsNil reports whether p is nil.
|
|||
func structPointer_IsNil(p structPointer) bool { |
|||
return p == nil |
|||
} |
|||
|
|||
// Interface returns the struct pointer, assumed to have element type t,
|
|||
// as an interface value.
|
|||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} { |
|||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface() |
|||
} |
|||
|
|||
// A field identifies a field in a struct, accessible from a structPointer.
|
|||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
|||
type field uintptr |
|||
|
|||
// toField returns a field equivalent to the given reflect field.
|
|||
func toField(f *reflect.StructField) field { |
|||
return field(f.Offset) |
|||
} |
|||
|
|||
// invalidField is an invalid field identifier.
|
|||
const invalidField = ^field(0) |
|||
|
|||
// IsValid reports whether the field identifier is valid.
|
|||
func (f field) IsValid() bool { |
|||
return f != ^field(0) |
|||
} |
|||
|
|||
// Bytes returns the address of a []byte field in the struct.
|
|||
func structPointer_Bytes(p structPointer, f field) *[]byte { |
|||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// BytesSlice returns the address of a [][]byte field in the struct.
|
|||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte { |
|||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// Bool returns the address of a *bool field in the struct.
|
|||
func structPointer_Bool(p structPointer, f field) **bool { |
|||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// BoolVal returns the address of a bool field in the struct.
|
|||
func structPointer_BoolVal(p structPointer, f field) *bool { |
|||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// BoolSlice returns the address of a []bool field in the struct.
|
|||
func structPointer_BoolSlice(p structPointer, f field) *[]bool { |
|||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// String returns the address of a *string field in the struct.
|
|||
func structPointer_String(p structPointer, f field) **string { |
|||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// StringVal returns the address of a string field in the struct.
|
|||
func structPointer_StringVal(p structPointer, f field) *string { |
|||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// StringSlice returns the address of a []string field in the struct.
|
|||
func structPointer_StringSlice(p structPointer, f field) *[]string { |
|||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// ExtMap returns the address of an extension map field in the struct.
|
|||
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { |
|||
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { |
|||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { |
|||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) |
|||
} |
|||
|
|||
// SetStructPointer writes a *struct field in the struct.
|
|||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { |
|||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q |
|||
} |
|||
|
|||
// GetStructPointer reads a *struct field in the struct.
|
|||
func structPointer_GetStructPointer(p structPointer, f field) structPointer { |
|||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// StructPointerSlice the address of a []*struct field in the struct.
|
|||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { |
|||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
|||
type structPointerSlice []structPointer |
|||
|
|||
func (v *structPointerSlice) Len() int { return len(*v) } |
|||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } |
|||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } |
|||
|
|||
// A word32 is the address of a "pointer to 32-bit value" field.
|
|||
type word32 **uint32 |
|||
|
|||
// IsNil reports whether *v is nil.
|
|||
func word32_IsNil(p word32) bool { |
|||
return *p == nil |
|||
} |
|||
|
|||
// Set sets *v to point at a newly allocated word set to x.
|
|||
func word32_Set(p word32, o *Buffer, x uint32) { |
|||
if len(o.uint32s) == 0 { |
|||
o.uint32s = make([]uint32, uint32PoolSize) |
|||
} |
|||
o.uint32s[0] = x |
|||
*p = &o.uint32s[0] |
|||
o.uint32s = o.uint32s[1:] |
|||
} |
|||
|
|||
// Get gets the value pointed at by *v.
|
|||
func word32_Get(p word32) uint32 { |
|||
return **p |
|||
} |
|||
|
|||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
|||
func structPointer_Word32(p structPointer, f field) word32 { |
|||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) |
|||
} |
|||
|
|||
// A word32Val is the address of a 32-bit value field.
|
|||
type word32Val *uint32 |
|||
|
|||
// Set sets *p to x.
|
|||
func word32Val_Set(p word32Val, x uint32) { |
|||
*p = x |
|||
} |
|||
|
|||
// Get gets the value pointed at by p.
|
|||
func word32Val_Get(p word32Val) uint32 { |
|||
return *p |
|||
} |
|||
|
|||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
|||
func structPointer_Word32Val(p structPointer, f field) word32Val { |
|||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) |
|||
} |
|||
|
|||
// A word32Slice is a slice of 32-bit values.
|
|||
type word32Slice []uint32 |
|||
|
|||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } |
|||
func (v *word32Slice) Len() int { return len(*v) } |
|||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } |
|||
|
|||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
|||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice { |
|||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
|
|||
// word64 is like word32 but for 64-bit values.
|
|||
type word64 **uint64 |
|||
|
|||
func word64_Set(p word64, o *Buffer, x uint64) { |
|||
if len(o.uint64s) == 0 { |
|||
o.uint64s = make([]uint64, uint64PoolSize) |
|||
} |
|||
o.uint64s[0] = x |
|||
*p = &o.uint64s[0] |
|||
o.uint64s = o.uint64s[1:] |
|||
} |
|||
|
|||
func word64_IsNil(p word64) bool { |
|||
return *p == nil |
|||
} |
|||
|
|||
func word64_Get(p word64) uint64 { |
|||
return **p |
|||
} |
|||
|
|||
func structPointer_Word64(p structPointer, f field) word64 { |
|||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) |
|||
} |
|||
|
|||
// word64Val is like word32Val but for 64-bit values.
|
|||
type word64Val *uint64 |
|||
|
|||
func word64Val_Set(p word64Val, o *Buffer, x uint64) { |
|||
*p = x |
|||
} |
|||
|
|||
func word64Val_Get(p word64Val) uint64 { |
|||
return *p |
|||
} |
|||
|
|||
func structPointer_Word64Val(p structPointer, f field) word64Val { |
|||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) |
|||
} |
|||
|
|||
// word64Slice is like word32Slice but for 64-bit values.
|
|||
type word64Slice []uint64 |
|||
|
|||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } |
|||
func (v *word64Slice) Len() int { return len(*v) } |
|||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } |
|||
|
|||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice { |
|||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) |
|||
} |
|||
@ -0,0 +1,872 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
/* |
|||
* Routines for encoding data into the wire format for protocol buffers. |
|||
*/ |
|||
|
|||
import ( |
|||
"fmt" |
|||
"log" |
|||
"os" |
|||
"reflect" |
|||
"sort" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
) |
|||
|
|||
const debug bool = false |
|||
|
|||
// Constants that identify the encoding of a value on the wire.
|
|||
const ( |
|||
WireVarint = 0 |
|||
WireFixed64 = 1 |
|||
WireBytes = 2 |
|||
WireStartGroup = 3 |
|||
WireEndGroup = 4 |
|||
WireFixed32 = 5 |
|||
) |
|||
|
|||
const startSize = 10 // initial slice/string sizes
|
|||
|
|||
// Encoders are defined in encode.go
|
|||
// An encoder outputs the full representation of a field, including its
|
|||
// tag and encoder type.
|
|||
type encoder func(p *Buffer, prop *Properties, base structPointer) error |
|||
|
|||
// A valueEncoder encodes a single integer in a particular encoding.
|
|||
type valueEncoder func(o *Buffer, x uint64) error |
|||
|
|||
// Sizers are defined in encode.go
|
|||
// A sizer returns the encoded size of a field, including its tag and encoder
|
|||
// type.
|
|||
type sizer func(prop *Properties, base structPointer) int |
|||
|
|||
// A valueSizer returns the encoded size of a single integer in a particular
|
|||
// encoding.
|
|||
type valueSizer func(x uint64) int |
|||
|
|||
// Decoders are defined in decode.go
|
|||
// A decoder creates a value from its wire representation.
|
|||
// Unrecognized subelements are saved in unrec.
|
|||
type decoder func(p *Buffer, prop *Properties, base structPointer) error |
|||
|
|||
// A valueDecoder decodes a single integer in a particular encoding.
|
|||
type valueDecoder func(o *Buffer) (x uint64, err error) |
|||
|
|||
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
|||
type oneofMarshaler func(Message, *Buffer) error |
|||
|
|||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
|||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) |
|||
|
|||
// A oneofSizer does the sizing for all oneof fields in a message.
|
|||
type oneofSizer func(Message) int |
|||
|
|||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
|||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
|||
// numbers.
|
|||
type tagMap struct { |
|||
fastTags []int |
|||
slowTags map[int]int |
|||
} |
|||
|
|||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
|||
// the tagMap slice rather than its map.
|
|||
const tagMapFastLimit = 1024 |
|||
|
|||
func (p *tagMap) get(t int) (int, bool) { |
|||
if t > 0 && t < tagMapFastLimit { |
|||
if t >= len(p.fastTags) { |
|||
return 0, false |
|||
} |
|||
fi := p.fastTags[t] |
|||
return fi, fi >= 0 |
|||
} |
|||
fi, ok := p.slowTags[t] |
|||
return fi, ok |
|||
} |
|||
|
|||
func (p *tagMap) put(t int, fi int) { |
|||
if t > 0 && t < tagMapFastLimit { |
|||
for len(p.fastTags) < t+1 { |
|||
p.fastTags = append(p.fastTags, -1) |
|||
} |
|||
p.fastTags[t] = fi |
|||
return |
|||
} |
|||
if p.slowTags == nil { |
|||
p.slowTags = make(map[int]int) |
|||
} |
|||
p.slowTags[t] = fi |
|||
} |
|||
|
|||
// StructProperties represents properties for all the fields of a struct.
|
|||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
|||
type StructProperties struct { |
|||
Prop []*Properties // properties for each field
|
|||
reqCount int // required count
|
|||
decoderTags tagMap // map from proto tag to struct field number
|
|||
decoderOrigNames map[string]int // map from original name to struct field number
|
|||
order []int // list of struct field numbers in tag order
|
|||
unrecField field // field id of the XXX_unrecognized []byte field
|
|||
extendable bool // is this an extendable proto
|
|||
|
|||
oneofMarshaler oneofMarshaler |
|||
oneofUnmarshaler oneofUnmarshaler |
|||
oneofSizer oneofSizer |
|||
stype reflect.Type |
|||
|
|||
// OneofTypes contains information about the oneof fields in this message.
|
|||
// It is keyed by the original name of a field.
|
|||
OneofTypes map[string]*OneofProperties |
|||
} |
|||
|
|||
// OneofProperties represents information about a specific field in a oneof.
|
|||
type OneofProperties struct { |
|||
Type reflect.Type // pointer to generated struct type for this oneof field
|
|||
Field int // struct field number of the containing oneof in the message
|
|||
Prop *Properties |
|||
} |
|||
|
|||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
|||
// See encode.go, (*Buffer).enc_struct.
|
|||
|
|||
func (sp *StructProperties) Len() int { return len(sp.order) } |
|||
func (sp *StructProperties) Less(i, j int) bool { |
|||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag |
|||
} |
|||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } |
|||
|
|||
// Properties represents the protocol-specific behavior of a single struct field.
|
|||
type Properties struct { |
|||
Name string // name of the field, for error messages
|
|||
OrigName string // original name before protocol compiler (always set)
|
|||
JSONName string // name to use for JSON; determined by protoc
|
|||
Wire string |
|||
WireType int |
|||
Tag int |
|||
Required bool |
|||
Optional bool |
|||
Repeated bool |
|||
Packed bool // relevant for repeated primitives only
|
|||
Enum string // set for enum types only
|
|||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
|||
oneof bool // whether this is a oneof field
|
|||
|
|||
Default string // default value
|
|||
HasDefault bool // whether an explicit default was provided
|
|||
def_uint64 uint64 |
|||
|
|||
enc encoder |
|||
valEnc valueEncoder // set for bool and numeric types only
|
|||
field field |
|||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
|||
tagbuf [8]byte |
|||
stype reflect.Type // set for struct types only
|
|||
sprop *StructProperties // set for struct types only
|
|||
isMarshaler bool |
|||
isUnmarshaler bool |
|||
|
|||
mtype reflect.Type // set for map types only
|
|||
mkeyprop *Properties // set for map types only
|
|||
mvalprop *Properties // set for map types only
|
|||
|
|||
size sizer |
|||
valSize valueSizer // set for bool and numeric types only
|
|||
|
|||
dec decoder |
|||
valDec valueDecoder // set for bool and numeric types only
|
|||
|
|||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
|||
packedDec decoder |
|||
} |
|||
|
|||
// String formats the properties in the protobuf struct field tag style.
|
|||
func (p *Properties) String() string { |
|||
s := p.Wire |
|||
s = "," |
|||
s += strconv.Itoa(p.Tag) |
|||
if p.Required { |
|||
s += ",req" |
|||
} |
|||
if p.Optional { |
|||
s += ",opt" |
|||
} |
|||
if p.Repeated { |
|||
s += ",rep" |
|||
} |
|||
if p.Packed { |
|||
s += ",packed" |
|||
} |
|||
s += ",name=" + p.OrigName |
|||
if p.JSONName != p.OrigName { |
|||
s += ",json=" + p.JSONName |
|||
} |
|||
if p.proto3 { |
|||
s += ",proto3" |
|||
} |
|||
if p.oneof { |
|||
s += ",oneof" |
|||
} |
|||
if len(p.Enum) > 0 { |
|||
s += ",enum=" + p.Enum |
|||
} |
|||
if p.HasDefault { |
|||
s += ",def=" + p.Default |
|||
} |
|||
return s |
|||
} |
|||
|
|||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
|||
func (p *Properties) Parse(s string) { |
|||
// "bytes,49,opt,name=foo,def=hello!"
|
|||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
|||
if len(fields) < 2 { |
|||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) |
|||
return |
|||
} |
|||
|
|||
p.Wire = fields[0] |
|||
switch p.Wire { |
|||
case "varint": |
|||
p.WireType = WireVarint |
|||
p.valEnc = (*Buffer).EncodeVarint |
|||
p.valDec = (*Buffer).DecodeVarint |
|||
p.valSize = sizeVarint |
|||
case "fixed32": |
|||
p.WireType = WireFixed32 |
|||
p.valEnc = (*Buffer).EncodeFixed32 |
|||
p.valDec = (*Buffer).DecodeFixed32 |
|||
p.valSize = sizeFixed32 |
|||
case "fixed64": |
|||
p.WireType = WireFixed64 |
|||
p.valEnc = (*Buffer).EncodeFixed64 |
|||
p.valDec = (*Buffer).DecodeFixed64 |
|||
p.valSize = sizeFixed64 |
|||
case "zigzag32": |
|||
p.WireType = WireVarint |
|||
p.valEnc = (*Buffer).EncodeZigzag32 |
|||
p.valDec = (*Buffer).DecodeZigzag32 |
|||
p.valSize = sizeZigzag32 |
|||
case "zigzag64": |
|||
p.WireType = WireVarint |
|||
p.valEnc = (*Buffer).EncodeZigzag64 |
|||
p.valDec = (*Buffer).DecodeZigzag64 |
|||
p.valSize = sizeZigzag64 |
|||
case "bytes", "group": |
|||
p.WireType = WireBytes |
|||
// no numeric converter for non-numeric types
|
|||
default: |
|||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) |
|||
return |
|||
} |
|||
|
|||
var err error |
|||
p.Tag, err = strconv.Atoi(fields[1]) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
for i := 2; i < len(fields); i++ { |
|||
f := fields[i] |
|||
switch { |
|||
case f == "req": |
|||
p.Required = true |
|||
case f == "opt": |
|||
p.Optional = true |
|||
case f == "rep": |
|||
p.Repeated = true |
|||
case f == "packed": |
|||
p.Packed = true |
|||
case strings.HasPrefix(f, "name="): |
|||
p.OrigName = f[5:] |
|||
case strings.HasPrefix(f, "json="): |
|||
p.JSONName = f[5:] |
|||
case strings.HasPrefix(f, "enum="): |
|||
p.Enum = f[5:] |
|||
case f == "proto3": |
|||
p.proto3 = true |
|||
case f == "oneof": |
|||
p.oneof = true |
|||
case strings.HasPrefix(f, "def="): |
|||
p.HasDefault = true |
|||
p.Default = f[4:] // rest of string
|
|||
if i+1 < len(fields) { |
|||
// Commas aren't escaped, and def is always last.
|
|||
p.Default += "," + strings.Join(fields[i+1:], ",") |
|||
break |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
func logNoSliceEnc(t1, t2 reflect.Type) { |
|||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) |
|||
} |
|||
|
|||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() |
|||
|
|||
// Initialize the fields for encoding and decoding.
|
|||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { |
|||
p.enc = nil |
|||
p.dec = nil |
|||
p.size = nil |
|||
|
|||
switch t1 := typ; t1.Kind() { |
|||
default: |
|||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) |
|||
|
|||
// proto3 scalar types
|
|||
|
|||
case reflect.Bool: |
|||
p.enc = (*Buffer).enc_proto3_bool |
|||
p.dec = (*Buffer).dec_proto3_bool |
|||
p.size = size_proto3_bool |
|||
case reflect.Int32: |
|||
p.enc = (*Buffer).enc_proto3_int32 |
|||
p.dec = (*Buffer).dec_proto3_int32 |
|||
p.size = size_proto3_int32 |
|||
case reflect.Uint32: |
|||
p.enc = (*Buffer).enc_proto3_uint32 |
|||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
|||
p.size = size_proto3_uint32 |
|||
case reflect.Int64, reflect.Uint64: |
|||
p.enc = (*Buffer).enc_proto3_int64 |
|||
p.dec = (*Buffer).dec_proto3_int64 |
|||
p.size = size_proto3_int64 |
|||
case reflect.Float32: |
|||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
|||
p.dec = (*Buffer).dec_proto3_int32 |
|||
p.size = size_proto3_uint32 |
|||
case reflect.Float64: |
|||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
|||
p.dec = (*Buffer).dec_proto3_int64 |
|||
p.size = size_proto3_int64 |
|||
case reflect.String: |
|||
p.enc = (*Buffer).enc_proto3_string |
|||
p.dec = (*Buffer).dec_proto3_string |
|||
p.size = size_proto3_string |
|||
|
|||
case reflect.Ptr: |
|||
switch t2 := t1.Elem(); t2.Kind() { |
|||
default: |
|||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) |
|||
break |
|||
case reflect.Bool: |
|||
p.enc = (*Buffer).enc_bool |
|||
p.dec = (*Buffer).dec_bool |
|||
p.size = size_bool |
|||
case reflect.Int32: |
|||
p.enc = (*Buffer).enc_int32 |
|||
p.dec = (*Buffer).dec_int32 |
|||
p.size = size_int32 |
|||
case reflect.Uint32: |
|||
p.enc = (*Buffer).enc_uint32 |
|||
p.dec = (*Buffer).dec_int32 // can reuse
|
|||
p.size = size_uint32 |
|||
case reflect.Int64, reflect.Uint64: |
|||
p.enc = (*Buffer).enc_int64 |
|||
p.dec = (*Buffer).dec_int64 |
|||
p.size = size_int64 |
|||
case reflect.Float32: |
|||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
|||
p.dec = (*Buffer).dec_int32 |
|||
p.size = size_uint32 |
|||
case reflect.Float64: |
|||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
|||
p.dec = (*Buffer).dec_int64 |
|||
p.size = size_int64 |
|||
case reflect.String: |
|||
p.enc = (*Buffer).enc_string |
|||
p.dec = (*Buffer).dec_string |
|||
p.size = size_string |
|||
case reflect.Struct: |
|||
p.stype = t1.Elem() |
|||
p.isMarshaler = isMarshaler(t1) |
|||
p.isUnmarshaler = isUnmarshaler(t1) |
|||
if p.Wire == "bytes" { |
|||
p.enc = (*Buffer).enc_struct_message |
|||
p.dec = (*Buffer).dec_struct_message |
|||
p.size = size_struct_message |
|||
} else { |
|||
p.enc = (*Buffer).enc_struct_group |
|||
p.dec = (*Buffer).dec_struct_group |
|||
p.size = size_struct_group |
|||
} |
|||
} |
|||
|
|||
case reflect.Slice: |
|||
switch t2 := t1.Elem(); t2.Kind() { |
|||
default: |
|||
logNoSliceEnc(t1, t2) |
|||
break |
|||
case reflect.Bool: |
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_bool |
|||
p.size = size_slice_packed_bool |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_bool |
|||
p.size = size_slice_bool |
|||
} |
|||
p.dec = (*Buffer).dec_slice_bool |
|||
p.packedDec = (*Buffer).dec_slice_packed_bool |
|||
case reflect.Int32: |
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_int32 |
|||
p.size = size_slice_packed_int32 |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_int32 |
|||
p.size = size_slice_int32 |
|||
} |
|||
p.dec = (*Buffer).dec_slice_int32 |
|||
p.packedDec = (*Buffer).dec_slice_packed_int32 |
|||
case reflect.Uint32: |
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_uint32 |
|||
p.size = size_slice_packed_uint32 |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_uint32 |
|||
p.size = size_slice_uint32 |
|||
} |
|||
p.dec = (*Buffer).dec_slice_int32 |
|||
p.packedDec = (*Buffer).dec_slice_packed_int32 |
|||
case reflect.Int64, reflect.Uint64: |
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_int64 |
|||
p.size = size_slice_packed_int64 |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_int64 |
|||
p.size = size_slice_int64 |
|||
} |
|||
p.dec = (*Buffer).dec_slice_int64 |
|||
p.packedDec = (*Buffer).dec_slice_packed_int64 |
|||
case reflect.Uint8: |
|||
p.dec = (*Buffer).dec_slice_byte |
|||
if p.proto3 { |
|||
p.enc = (*Buffer).enc_proto3_slice_byte |
|||
p.size = size_proto3_slice_byte |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_byte |
|||
p.size = size_slice_byte |
|||
} |
|||
case reflect.Float32, reflect.Float64: |
|||
switch t2.Bits() { |
|||
case 32: |
|||
// can just treat them as bits
|
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_uint32 |
|||
p.size = size_slice_packed_uint32 |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_uint32 |
|||
p.size = size_slice_uint32 |
|||
} |
|||
p.dec = (*Buffer).dec_slice_int32 |
|||
p.packedDec = (*Buffer).dec_slice_packed_int32 |
|||
case 64: |
|||
// can just treat them as bits
|
|||
if p.Packed { |
|||
p.enc = (*Buffer).enc_slice_packed_int64 |
|||
p.size = size_slice_packed_int64 |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_int64 |
|||
p.size = size_slice_int64 |
|||
} |
|||
p.dec = (*Buffer).dec_slice_int64 |
|||
p.packedDec = (*Buffer).dec_slice_packed_int64 |
|||
default: |
|||
logNoSliceEnc(t1, t2) |
|||
break |
|||
} |
|||
case reflect.String: |
|||
p.enc = (*Buffer).enc_slice_string |
|||
p.dec = (*Buffer).dec_slice_string |
|||
p.size = size_slice_string |
|||
case reflect.Ptr: |
|||
switch t3 := t2.Elem(); t3.Kind() { |
|||
default: |
|||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) |
|||
break |
|||
case reflect.Struct: |
|||
p.stype = t2.Elem() |
|||
p.isMarshaler = isMarshaler(t2) |
|||
p.isUnmarshaler = isUnmarshaler(t2) |
|||
if p.Wire == "bytes" { |
|||
p.enc = (*Buffer).enc_slice_struct_message |
|||
p.dec = (*Buffer).dec_slice_struct_message |
|||
p.size = size_slice_struct_message |
|||
} else { |
|||
p.enc = (*Buffer).enc_slice_struct_group |
|||
p.dec = (*Buffer).dec_slice_struct_group |
|||
p.size = size_slice_struct_group |
|||
} |
|||
} |
|||
case reflect.Slice: |
|||
switch t2.Elem().Kind() { |
|||
default: |
|||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) |
|||
break |
|||
case reflect.Uint8: |
|||
p.enc = (*Buffer).enc_slice_slice_byte |
|||
p.dec = (*Buffer).dec_slice_slice_byte |
|||
p.size = size_slice_slice_byte |
|||
} |
|||
} |
|||
|
|||
case reflect.Map: |
|||
p.enc = (*Buffer).enc_new_map |
|||
p.dec = (*Buffer).dec_new_map |
|||
p.size = size_new_map |
|||
|
|||
p.mtype = t1 |
|||
p.mkeyprop = &Properties{} |
|||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) |
|||
p.mvalprop = &Properties{} |
|||
vtype := p.mtype.Elem() |
|||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { |
|||
// The value type is not a message (*T) or bytes ([]byte),
|
|||
// so we need encoders for the pointer to this type.
|
|||
vtype = reflect.PtrTo(vtype) |
|||
} |
|||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) |
|||
} |
|||
|
|||
// precalculate tag code
|
|||
wire := p.WireType |
|||
if p.Packed { |
|||
wire = WireBytes |
|||
} |
|||
x := uint32(p.Tag)<<3 | uint32(wire) |
|||
i := 0 |
|||
for i = 0; x > 127; i++ { |
|||
p.tagbuf[i] = 0x80 | uint8(x&0x7F) |
|||
x >>= 7 |
|||
} |
|||
p.tagbuf[i] = uint8(x) |
|||
p.tagcode = p.tagbuf[0 : i+1] |
|||
|
|||
if p.stype != nil { |
|||
if lockGetProp { |
|||
p.sprop = GetProperties(p.stype) |
|||
} else { |
|||
p.sprop = getPropertiesLocked(p.stype) |
|||
} |
|||
} |
|||
} |
|||
|
|||
var ( |
|||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() |
|||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() |
|||
) |
|||
|
|||
// isMarshaler reports whether type t implements Marshaler.
|
|||
func isMarshaler(t reflect.Type) bool { |
|||
// We're checking for (likely) pointer-receiver methods
|
|||
// so if t is not a pointer, something is very wrong.
|
|||
// The calls above only invoke isMarshaler on pointer types.
|
|||
if t.Kind() != reflect.Ptr { |
|||
panic("proto: misuse of isMarshaler") |
|||
} |
|||
return t.Implements(marshalerType) |
|||
} |
|||
|
|||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
|||
func isUnmarshaler(t reflect.Type) bool { |
|||
// We're checking for (likely) pointer-receiver methods
|
|||
// so if t is not a pointer, something is very wrong.
|
|||
// The calls above only invoke isUnmarshaler on pointer types.
|
|||
if t.Kind() != reflect.Ptr { |
|||
panic("proto: misuse of isUnmarshaler") |
|||
} |
|||
return t.Implements(unmarshalerType) |
|||
} |
|||
|
|||
// Init populates the properties from a protocol buffer struct tag.
|
|||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { |
|||
p.init(typ, name, tag, f, true) |
|||
} |
|||
|
|||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { |
|||
// "bytes,49,opt,def=hello!"
|
|||
p.Name = name |
|||
p.OrigName = name |
|||
if f != nil { |
|||
p.field = toField(f) |
|||
} |
|||
if tag == "" { |
|||
return |
|||
} |
|||
p.Parse(tag) |
|||
p.setEncAndDec(typ, f, lockGetProp) |
|||
} |
|||
|
|||
var ( |
|||
propertiesMu sync.RWMutex |
|||
propertiesMap = make(map[reflect.Type]*StructProperties) |
|||
) |
|||
|
|||
// GetProperties returns the list of properties for the type represented by t.
|
|||
// t must represent a generated struct type of a protocol message.
|
|||
func GetProperties(t reflect.Type) *StructProperties { |
|||
if t.Kind() != reflect.Struct { |
|||
panic("proto: type must have kind struct") |
|||
} |
|||
|
|||
// Most calls to GetProperties in a long-running program will be
|
|||
// retrieving details for types we have seen before.
|
|||
propertiesMu.RLock() |
|||
sprop, ok := propertiesMap[t] |
|||
propertiesMu.RUnlock() |
|||
if ok { |
|||
if collectStats { |
|||
stats.Chit++ |
|||
} |
|||
return sprop |
|||
} |
|||
|
|||
propertiesMu.Lock() |
|||
sprop = getPropertiesLocked(t) |
|||
propertiesMu.Unlock() |
|||
return sprop |
|||
} |
|||
|
|||
// getPropertiesLocked requires that propertiesMu is held.
|
|||
func getPropertiesLocked(t reflect.Type) *StructProperties { |
|||
if prop, ok := propertiesMap[t]; ok { |
|||
if collectStats { |
|||
stats.Chit++ |
|||
} |
|||
return prop |
|||
} |
|||
if collectStats { |
|||
stats.Cmiss++ |
|||
} |
|||
|
|||
prop := new(StructProperties) |
|||
// in case of recursive protos, fill this in now.
|
|||
propertiesMap[t] = prop |
|||
|
|||
// build properties
|
|||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || |
|||
reflect.PtrTo(t).Implements(extendableProtoV1Type) |
|||
prop.unrecField = invalidField |
|||
prop.Prop = make([]*Properties, t.NumField()) |
|||
prop.order = make([]int, t.NumField()) |
|||
|
|||
for i := 0; i < t.NumField(); i++ { |
|||
f := t.Field(i) |
|||
p := new(Properties) |
|||
name := f.Name |
|||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) |
|||
|
|||
if f.Name == "XXX_InternalExtensions" { // special case
|
|||
p.enc = (*Buffer).enc_exts |
|||
p.dec = nil // not needed
|
|||
p.size = size_exts |
|||
} else if f.Name == "XXX_extensions" { // special case
|
|||
p.enc = (*Buffer).enc_map |
|||
p.dec = nil // not needed
|
|||
p.size = size_map |
|||
} else if f.Name == "XXX_unrecognized" { // special case
|
|||
prop.unrecField = toField(&f) |
|||
} |
|||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
|||
if oneof != "" { |
|||
// Oneof fields don't use the traditional protobuf tag.
|
|||
p.OrigName = oneof |
|||
} |
|||
prop.Prop[i] = p |
|||
prop.order[i] = i |
|||
if debug { |
|||
print(i, " ", f.Name, " ", t.String(), " ") |
|||
if p.Tag > 0 { |
|||
print(p.String()) |
|||
} |
|||
print("\n") |
|||
} |
|||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { |
|||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") |
|||
} |
|||
} |
|||
|
|||
// Re-order prop.order.
|
|||
sort.Sort(prop) |
|||
|
|||
type oneofMessage interface { |
|||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) |
|||
} |
|||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { |
|||
var oots []interface{} |
|||
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() |
|||
prop.stype = t |
|||
|
|||
// Interpret oneof metadata.
|
|||
prop.OneofTypes = make(map[string]*OneofProperties) |
|||
for _, oot := range oots { |
|||
oop := &OneofProperties{ |
|||
Type: reflect.ValueOf(oot).Type(), // *T
|
|||
Prop: new(Properties), |
|||
} |
|||
sft := oop.Type.Elem().Field(0) |
|||
oop.Prop.Name = sft.Name |
|||
oop.Prop.Parse(sft.Tag.Get("protobuf")) |
|||
// There will be exactly one interface field that
|
|||
// this new value is assignable to.
|
|||
for i := 0; i < t.NumField(); i++ { |
|||
f := t.Field(i) |
|||
if f.Type.Kind() != reflect.Interface { |
|||
continue |
|||
} |
|||
if !oop.Type.AssignableTo(f.Type) { |
|||
continue |
|||
} |
|||
oop.Field = i |
|||
break |
|||
} |
|||
prop.OneofTypes[oop.Prop.OrigName] = oop |
|||
} |
|||
} |
|||
|
|||
// build required counts
|
|||
// build tags
|
|||
reqCount := 0 |
|||
prop.decoderOrigNames = make(map[string]int) |
|||
for i, p := range prop.Prop { |
|||
if strings.HasPrefix(p.Name, "XXX_") { |
|||
// Internal fields should not appear in tags/origNames maps.
|
|||
// They are handled specially when encoding and decoding.
|
|||
continue |
|||
} |
|||
if p.Required { |
|||
reqCount++ |
|||
} |
|||
prop.decoderTags.put(p.Tag, i) |
|||
prop.decoderOrigNames[p.OrigName] = i |
|||
} |
|||
prop.reqCount = reqCount |
|||
|
|||
return prop |
|||
} |
|||
|
|||
// Return the Properties object for the x[0]'th field of the structure.
|
|||
func propByIndex(t reflect.Type, x []int) *Properties { |
|||
if len(x) != 1 { |
|||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) |
|||
return nil |
|||
} |
|||
prop := GetProperties(t) |
|||
return prop.Prop[x[0]] |
|||
} |
|||
|
|||
// Get the address and type of a pointer to a struct from an interface.
|
|||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) { |
|||
if pb == nil { |
|||
err = ErrNil |
|||
return |
|||
} |
|||
// get the reflect type of the pointer to the struct.
|
|||
t = reflect.TypeOf(pb) |
|||
// get the address of the struct.
|
|||
value := reflect.ValueOf(pb) |
|||
b = toStructPointer(value) |
|||
return |
|||
} |
|||
|
|||
// A global registry of enum types.
|
|||
// The generated code will register the generated maps by calling RegisterEnum.
|
|||
|
|||
var enumValueMaps = make(map[string]map[string]int32) |
|||
|
|||
// RegisterEnum is called from the generated code to install the enum descriptor
|
|||
// maps into the global table to aid parsing text format protocol buffers.
|
|||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { |
|||
if _, ok := enumValueMaps[typeName]; ok { |
|||
panic("proto: duplicate enum registered: " + typeName) |
|||
} |
|||
enumValueMaps[typeName] = valueMap |
|||
} |
|||
|
|||
// EnumValueMap returns the mapping from names to integers of the
|
|||
// enum type enumType, or a nil if not found.
|
|||
func EnumValueMap(enumType string) map[string]int32 { |
|||
return enumValueMaps[enumType] |
|||
} |
|||
|
|||
// A registry of all linked message types.
|
|||
// The string is a fully-qualified proto name ("pkg.Message").
|
|||
var ( |
|||
protoTypes = make(map[string]reflect.Type) |
|||
revProtoTypes = make(map[reflect.Type]string) |
|||
) |
|||
|
|||
// RegisterType is called from generated code and maps from the fully qualified
|
|||
// proto name to the type (pointer to struct) of the protocol buffer.
|
|||
func RegisterType(x Message, name string) { |
|||
if _, ok := protoTypes[name]; ok { |
|||
// TODO: Some day, make this a panic.
|
|||
log.Printf("proto: duplicate proto type registered: %s", name) |
|||
return |
|||
} |
|||
t := reflect.TypeOf(x) |
|||
protoTypes[name] = t |
|||
revProtoTypes[t] = name |
|||
} |
|||
|
|||
// MessageName returns the fully-qualified proto name for the given message type.
|
|||
func MessageName(x Message) string { |
|||
type xname interface { |
|||
XXX_MessageName() string |
|||
} |
|||
if m, ok := x.(xname); ok { |
|||
return m.XXX_MessageName() |
|||
} |
|||
return revProtoTypes[reflect.TypeOf(x)] |
|||
} |
|||
|
|||
// MessageType returns the message type (pointer to struct) for a named message.
|
|||
func MessageType(name string) reflect.Type { return protoTypes[name] } |
|||
|
|||
// A registry of all linked proto files.
|
|||
var ( |
|||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
|||
) |
|||
|
|||
// RegisterFile is called from generated code and maps from the
|
|||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
|||
func RegisterFile(filename string, fileDescriptor []byte) { |
|||
protoFiles[filename] = fileDescriptor |
|||
} |
|||
|
|||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
|||
func FileDescriptor(filename string) []byte { return protoFiles[filename] } |
|||
@ -0,0 +1,219 @@ |
|||
// Code generated by protoc-gen-go.
|
|||
// source: proto3_proto/proto3.proto
|
|||
// DO NOT EDIT!
|
|||
|
|||
/* |
|||
Package proto3_proto is a generated protocol buffer package. |
|||
|
|||
It is generated from these files: |
|||
proto3_proto/proto3.proto |
|||
|
|||
It has these top-level messages: |
|||
Message |
|||
Nested |
|||
MessageWithMap |
|||
*/ |
|||
package proto3_proto |
|||
|
|||
import proto "github.com/golang/protobuf/proto" |
|||
import fmt "fmt" |
|||
import math "math" |
|||
import google_protobuf "github.com/golang/protobuf/ptypes/any" |
|||
import testdata "github.com/golang/protobuf/proto/testdata" |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ = proto.Marshal |
|||
var _ = fmt.Errorf |
|||
var _ = math.Inf |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the proto package it is being compiled against.
|
|||
const _ = proto.ProtoPackageIsVersion1 |
|||
|
|||
type Message_Humour int32 |
|||
|
|||
const ( |
|||
Message_UNKNOWN Message_Humour = 0 |
|||
Message_PUNS Message_Humour = 1 |
|||
Message_SLAPSTICK Message_Humour = 2 |
|||
Message_BILL_BAILEY Message_Humour = 3 |
|||
) |
|||
|
|||
var Message_Humour_name = map[int32]string{ |
|||
0: "UNKNOWN", |
|||
1: "PUNS", |
|||
2: "SLAPSTICK", |
|||
3: "BILL_BAILEY", |
|||
} |
|||
var Message_Humour_value = map[string]int32{ |
|||
"UNKNOWN": 0, |
|||
"PUNS": 1, |
|||
"SLAPSTICK": 2, |
|||
"BILL_BAILEY": 3, |
|||
} |
|||
|
|||
func (x Message_Humour) String() string { |
|||
return proto.EnumName(Message_Humour_name, int32(x)) |
|||
} |
|||
func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } |
|||
|
|||
type Message struct { |
|||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` |
|||
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` |
|||
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` |
|||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` |
|||
ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` |
|||
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` |
|||
Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` |
|||
Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` |
|||
ShortKey []int32 `protobuf:"varint,19,rep,name=short_key,json=shortKey" json:"short_key,omitempty"` |
|||
Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` |
|||
RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` |
|||
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` |
|||
Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` |
|||
Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` |
|||
Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` |
|||
ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` |
|||
Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` |
|||
Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` |
|||
} |
|||
|
|||
func (m *Message) Reset() { *m = Message{} } |
|||
func (m *Message) String() string { return proto.CompactTextString(m) } |
|||
func (*Message) ProtoMessage() {} |
|||
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } |
|||
|
|||
func (m *Message) GetNested() *Nested { |
|||
if m != nil { |
|||
return m.Nested |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetTerrain() map[string]*Nested { |
|||
if m != nil { |
|||
return m.Terrain |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetProto2Field() *testdata.SubDefaults { |
|||
if m != nil { |
|||
return m.Proto2Field |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { |
|||
if m != nil { |
|||
return m.Proto2Value |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetAnything() *google_protobuf.Any { |
|||
if m != nil { |
|||
return m.Anything |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetManyThings() []*google_protobuf.Any { |
|||
if m != nil { |
|||
return m.ManyThings |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetSubmessage() *Message { |
|||
if m != nil { |
|||
return m.Submessage |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetChildren() []*Message { |
|||
if m != nil { |
|||
return m.Children |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type Nested struct { |
|||
Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` |
|||
Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` |
|||
} |
|||
|
|||
func (m *Nested) Reset() { *m = Nested{} } |
|||
func (m *Nested) String() string { return proto.CompactTextString(m) } |
|||
func (*Nested) ProtoMessage() {} |
|||
func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } |
|||
|
|||
type MessageWithMap struct { |
|||
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` |
|||
} |
|||
|
|||
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } |
|||
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } |
|||
func (*MessageWithMap) ProtoMessage() {} |
|||
func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } |
|||
|
|||
func (m *MessageWithMap) GetByteMapping() map[bool][]byte { |
|||
if m != nil { |
|||
return m.ByteMapping |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func init() { |
|||
proto.RegisterType((*Message)(nil), "proto3_proto.Message") |
|||
proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") |
|||
proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") |
|||
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) |
|||
} |
|||
|
|||
var fileDescriptor0 = []byte{ |
|||
// 669 bytes of a gzipped FileDescriptorProto
|
|||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xd3, 0x3a, |
|||
0x18, 0xbd, 0x7d, 0x4f, 0x9f, 0xa4, 0x5b, 0xae, 0xb7, 0x2b, 0x79, 0xbd, 0x7c, 0x18, 0x45, 0x42, |
|||
0x13, 0x2f, 0x19, 0x14, 0x21, 0x4d, 0x08, 0x81, 0xb6, 0xb1, 0x89, 0x6a, 0x5d, 0xa9, 0xdc, 0x8d, |
|||
0x89, 0x4f, 0x51, 0xd2, 0xba, 0x6d, 0x44, 0xe3, 0x54, 0x89, 0x83, 0x94, 0xbf, 0xc3, 0x1f, 0x05, |
|||
0xbf, 0xa4, 0x5d, 0x36, 0x75, 0xf0, 0x29, 0xf6, 0x79, 0xce, 0xf1, 0xf3, 0xe4, 0xf8, 0x18, 0xf6, |
|||
0x96, 0x71, 0xc4, 0xa3, 0x37, 0xae, 0xfa, 0x1c, 0xea, 0x8d, 0xa3, 0x3e, 0xc8, 0x2a, 0x96, 0xda, |
|||
0x7b, 0xb3, 0x28, 0x9a, 0x2d, 0xa8, 0xa6, 0xf8, 0xe9, 0xf4, 0xd0, 0x63, 0x99, 0x26, 0xb6, 0x77, |
|||
0x38, 0x4d, 0xf8, 0xc4, 0xe3, 0xde, 0xa1, 0x5c, 0x68, 0xb0, 0xf3, 0xab, 0x01, 0x8d, 0x4b, 0x9a, |
|||
0x24, 0xde, 0x8c, 0x22, 0x04, 0x55, 0xe6, 0x85, 0x14, 0x97, 0xf6, 0x4b, 0x07, 0x4d, 0xa2, 0xd6, |
|||
0xe8, 0x08, 0x8c, 0x79, 0xb0, 0xf0, 0xe2, 0x80, 0x67, 0xb8, 0x2c, 0xf0, 0xad, 0xee, 0x23, 0xa7, |
|||
0xd8, 0xd0, 0xc9, 0xc5, 0xce, 0xe7, 0x34, 0x8c, 0xd2, 0x98, 0xac, 0xd9, 0x68, 0x1f, 0xac, 0x39, |
|||
0x0d, 0x66, 0x73, 0xee, 0x06, 0xcc, 0x1d, 0x87, 0xb8, 0x22, 0xd4, 0x2d, 0x02, 0x1a, 0xeb, 0xb1, |
|||
0xd3, 0x50, 0xf6, 0x93, 0xe3, 0xe0, 0xaa, 0xa8, 0x58, 0x44, 0xad, 0xd1, 0x63, 0xb0, 0x62, 0x9a, |
|||
0xa4, 0x0b, 0xee, 0x8e, 0xa3, 0x94, 0x71, 0xdc, 0x10, 0xb5, 0x0a, 0x31, 0x35, 0x76, 0x2a, 0x21, |
|||
0xf4, 0x04, 0x5a, 0x3c, 0x4e, 0xa9, 0x9b, 0x8c, 0x23, 0x9e, 0x84, 0x1e, 0xc3, 0x86, 0xe0, 0x18, |
|||
0xc4, 0x92, 0xe0, 0x28, 0xc7, 0xd0, 0x2e, 0xd4, 0x44, 0x3d, 0xa6, 0xb8, 0x29, 0x8a, 0x65, 0xa2, |
|||
0x37, 0xc8, 0x86, 0xca, 0x77, 0x9a, 0xe1, 0xda, 0x7e, 0xe5, 0xa0, 0x4a, 0xe4, 0x12, 0xfd, 0x0f, |
|||
0xcd, 0x64, 0x1e, 0xc5, 0xdc, 0x95, 0xf8, 0x8e, 0xc0, 0x6b, 0xc4, 0x50, 0xc0, 0x85, 0x28, 0xbe, |
|||
0x80, 0x3a, 0x13, 0x56, 0xd1, 0x09, 0xae, 0x8b, 0x53, 0xcc, 0xee, 0xee, 0xdd, 0x5f, 0x1f, 0xa8, |
|||
0x1a, 0xc9, 0x39, 0xe8, 0x2d, 0x34, 0x62, 0x77, 0x9a, 0x32, 0x96, 0x61, 0x5b, 0x1c, 0xf4, 0x37, |
|||
0xa7, 0xea, 0xf1, 0xb9, 0xe4, 0xa2, 0xf7, 0xd0, 0xe0, 0x34, 0x8e, 0xbd, 0x80, 0x61, 0x10, 0x32, |
|||
0xb3, 0xdb, 0xd9, 0x2c, 0xbb, 0xd2, 0xa4, 0x33, 0xc6, 0xe3, 0x8c, 0xac, 0x24, 0xe2, 0x7e, 0xf4, |
|||
0xfd, 0x77, 0xdd, 0x69, 0x40, 0x17, 0x13, 0x6c, 0xaa, 0x41, 0xff, 0x73, 0x56, 0x77, 0xed, 0x8c, |
|||
0x52, 0xff, 0x13, 0x9d, 0x7a, 0xc2, 0xbd, 0x84, 0x98, 0x9a, 0x7a, 0x2e, 0x99, 0xa8, 0xb7, 0x56, |
|||
0xfe, 0xf0, 0x16, 0x29, 0xc5, 0x2d, 0xd5, 0xfc, 0xe9, 0xe6, 0xe6, 0x43, 0xc5, 0xfc, 0x2a, 0x89, |
|||
0x7a, 0x80, 0xfc, 0x28, 0x85, 0xa0, 0x57, 0x60, 0x88, 0x98, 0xf1, 0x79, 0xc0, 0x66, 0x78, 0x2b, |
|||
0x77, 0x4a, 0xe7, 0xd0, 0x59, 0xe5, 0xd0, 0x39, 0x66, 0x19, 0x59, 0xb3, 0x84, 0x57, 0xa6, 0xb8, |
|||
0xa5, 0xcc, 0x55, 0xbb, 0x04, 0x6f, 0xab, 0xde, 0x9b, 0x45, 0x20, 0x89, 0x57, 0x8a, 0x27, 0x64, |
|||
0x90, 0xa4, 0x7e, 0xa8, 0x87, 0xc2, 0xff, 0xe6, 0xff, 0xba, 0x69, 0x62, 0x52, 0x20, 0xa2, 0xd7, |
|||
0x60, 0x8c, 0x45, 0x2e, 0x27, 0x31, 0x65, 0x18, 0xa9, 0x56, 0x0f, 0x88, 0xd6, 0xb4, 0xf6, 0x10, |
|||
0xac, 0xa2, 0xe1, 0xab, 0xe4, 0xe8, 0xa7, 0xa1, 0x92, 0xf3, 0x0c, 0x6a, 0xda, 0xb8, 0xf2, 0x1f, |
|||
0xb2, 0xa1, 0x29, 0xef, 0xca, 0x47, 0xa5, 0xf6, 0x35, 0xd8, 0xf7, 0x5d, 0xdc, 0x70, 0xea, 0xf3, |
|||
0xbb, 0xa7, 0x3e, 0x70, 0x91, 0xb7, 0xc7, 0x76, 0x3e, 0x42, 0x5d, 0x07, 0x0a, 0x99, 0xd0, 0xb8, |
|||
0x1e, 0x5c, 0x0c, 0xbe, 0xdc, 0x0c, 0xec, 0x7f, 0x90, 0x01, 0xd5, 0xe1, 0xf5, 0x60, 0x64, 0x97, |
|||
0x50, 0x0b, 0x9a, 0xa3, 0xfe, 0xf1, 0x70, 0x74, 0xd5, 0x3b, 0xbd, 0xb0, 0xcb, 0x68, 0x1b, 0xcc, |
|||
0x93, 0x5e, 0xbf, 0xef, 0x9e, 0x1c, 0xf7, 0xfa, 0x67, 0xdf, 0xec, 0x4a, 0xa7, 0x0b, 0x75, 0x3d, |
|||
0xac, 0x7c, 0x33, 0xbe, 0x8a, 0xaf, 0x9e, 0x47, 0x6f, 0xe4, 0x2b, 0x1d, 0xa7, 0x5c, 0x0f, 0x64, |
|||
0x10, 0xb5, 0xee, 0xfc, 0x2c, 0xc1, 0x56, 0xee, 0xd9, 0x4d, 0xc0, 0xe7, 0x97, 0xde, 0x12, 0x09, |
|||
0xc3, 0xfc, 0x8c, 0x53, 0x37, 0xf4, 0x96, 0x4b, 0x99, 0x83, 0x92, 0xf2, 0xf9, 0xe5, 0x46, 0x9f, |
|||
0x73, 0x8d, 0x73, 0x22, 0x04, 0x97, 0x9a, 0x9f, 0xa7, 0xca, 0xbf, 0x45, 0xda, 0x1f, 0xc0, 0xbe, |
|||
0x4f, 0x28, 0x1a, 0x66, 0x68, 0xc3, 0x76, 0x8b, 0x86, 0x59, 0x05, 0x67, 0xfc, 0xba, 0x6e, 0xfd, |
|||
0x3b, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x40, 0x3c, 0xbe, 0x3c, 0x05, 0x00, 0x00, |
|||
} |
|||
@ -0,0 +1,78 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format |
|||
// |
|||
// Copyright 2014 The Go Authors. All rights reserved. |
|||
// https://github.com/golang/protobuf |
|||
// |
|||
// Redistribution and use in source and binary forms, with or without |
|||
// modification, are permitted provided that the following conditions are |
|||
// met: |
|||
// |
|||
// * Redistributions of source code must retain the above copyright |
|||
// notice, this list of conditions and the following disclaimer. |
|||
// * Redistributions in binary form must reproduce the above |
|||
// copyright notice, this list of conditions and the following disclaimer |
|||
// in the documentation and/or other materials provided with the |
|||
// distribution. |
|||
// * Neither the name of Google Inc. nor the names of its |
|||
// contributors may be used to endorse or promote products derived from |
|||
// this software without specific prior written permission. |
|||
// |
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|||
|
|||
syntax = "proto3"; |
|||
|
|||
import "google/protobuf/any.proto"; |
|||
import "testdata/test.proto"; |
|||
|
|||
package proto3_proto; |
|||
|
|||
message Message { |
|||
enum Humour { |
|||
UNKNOWN = 0; |
|||
PUNS = 1; |
|||
SLAPSTICK = 2; |
|||
BILL_BAILEY = 3; |
|||
} |
|||
|
|||
string name = 1; |
|||
Humour hilarity = 2; |
|||
uint32 height_in_cm = 3; |
|||
bytes data = 4; |
|||
int64 result_count = 7; |
|||
bool true_scotsman = 8; |
|||
float score = 9; |
|||
|
|||
repeated uint64 key = 5; |
|||
repeated int32 short_key = 19; |
|||
Nested nested = 6; |
|||
repeated Humour r_funny = 16; |
|||
|
|||
map<string, Nested> terrain = 10; |
|||
testdata.SubDefaults proto2_field = 11; |
|||
map<string, testdata.SubDefaults> proto2_value = 13; |
|||
|
|||
google.protobuf.Any anything = 14; |
|||
repeated google.protobuf.Any many_things = 15; |
|||
|
|||
Message submessage = 17; |
|||
repeated Message children = 18; |
|||
} |
|||
|
|||
message Nested { |
|||
string bunny = 1; |
|||
bool cute = 2; |
|||
} |
|||
|
|||
message MessageWithMap { |
|||
map<bool, bytes> byte_mapping = 1; |
|||
} |
|||
@ -0,0 +1,125 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
tpb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
func TestProto3ZeroValues(t *testing.T) { |
|||
tests := []struct { |
|||
desc string |
|||
m proto.Message |
|||
}{ |
|||
{"zero message", &pb.Message{}}, |
|||
{"empty bytes field", &pb.Message{Data: []byte{}}}, |
|||
} |
|||
for _, test := range tests { |
|||
b, err := proto.Marshal(test.m) |
|||
if err != nil { |
|||
t.Errorf("%s: proto.Marshal: %v", test.desc, err) |
|||
continue |
|||
} |
|||
if len(b) > 0 { |
|||
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestRoundTripProto3(t *testing.T) { |
|||
m := &pb.Message{ |
|||
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
|
|||
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
|
|||
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
|
|||
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
|
|||
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
|
|||
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
|
|||
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
|
|||
|
|||
Key: []uint64{1, 0xdeadbeef}, |
|||
Nested: &pb.Nested{ |
|||
Bunny: "Monty", |
|||
}, |
|||
} |
|||
t.Logf(" m: %v", m) |
|||
|
|||
b, err := proto.Marshal(m) |
|||
if err != nil { |
|||
t.Fatalf("proto.Marshal: %v", err) |
|||
} |
|||
t.Logf(" b: %q", b) |
|||
|
|||
m2 := new(pb.Message) |
|||
if err := proto.Unmarshal(b, m2); err != nil { |
|||
t.Fatalf("proto.Unmarshal: %v", err) |
|||
} |
|||
t.Logf("m2: %v", m2) |
|||
|
|||
if !proto.Equal(m, m2) { |
|||
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) |
|||
} |
|||
} |
|||
|
|||
func TestProto3SetDefaults(t *testing.T) { |
|||
in := &pb.Message{ |
|||
Terrain: map[string]*pb.Nested{ |
|||
"meadow": new(pb.Nested), |
|||
}, |
|||
Proto2Field: new(tpb.SubDefaults), |
|||
Proto2Value: map[string]*tpb.SubDefaults{ |
|||
"badlands": new(tpb.SubDefaults), |
|||
}, |
|||
} |
|||
|
|||
got := proto.Clone(in).(*pb.Message) |
|||
proto.SetDefaults(got) |
|||
|
|||
// There are no defaults in proto3. Everything should be the zero value, but
|
|||
// we need to remember to set defaults for nested proto2 messages.
|
|||
want := &pb.Message{ |
|||
Terrain: map[string]*pb.Nested{ |
|||
"meadow": new(pb.Nested), |
|||
}, |
|||
Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, |
|||
Proto2Value: map[string]*tpb.SubDefaults{ |
|||
"badlands": &tpb.SubDefaults{N: proto.Int64(7)}, |
|||
}, |
|||
} |
|||
|
|||
if !proto.Equal(got, want) { |
|||
t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) |
|||
} |
|||
} |
|||
@ -0,0 +1,63 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
import ( |
|||
"testing" |
|||
) |
|||
|
|||
// This is a separate file and package from size_test.go because that one uses
|
|||
// generated messages and thus may not be in package proto without having a circular
|
|||
// dependency, whereas this file tests unexported details of size.go.
|
|||
|
|||
func TestVarintSize(t *testing.T) { |
|||
// Check the edge cases carefully.
|
|||
testCases := []struct { |
|||
n uint64 |
|||
size int |
|||
}{ |
|||
{0, 1}, |
|||
{1, 1}, |
|||
{127, 1}, |
|||
{128, 2}, |
|||
{16383, 2}, |
|||
{16384, 3}, |
|||
{1<<63 - 1, 9}, |
|||
{1 << 63, 10}, |
|||
} |
|||
for _, tc := range testCases { |
|||
size := sizeVarint(tc.n) |
|||
if size != tc.size { |
|||
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,164 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"log" |
|||
"strings" |
|||
"testing" |
|||
|
|||
. "github.com/golang/protobuf/proto" |
|||
proto3pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
pb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} |
|||
|
|||
// messageWithExtension2 is in equal_test.go.
|
|||
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} |
|||
|
|||
func init() { |
|||
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { |
|||
log.Panicf("SetExtension: %v", err) |
|||
} |
|||
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { |
|||
log.Panicf("SetExtension: %v", err) |
|||
} |
|||
|
|||
// Force messageWithExtension3 to have the extension encoded.
|
|||
Marshal(messageWithExtension3) |
|||
|
|||
} |
|||
|
|||
var SizeTests = []struct { |
|||
desc string |
|||
pb Message |
|||
}{ |
|||
{"empty", &pb.OtherMessage{}}, |
|||
// Basic types.
|
|||
{"bool", &pb.Defaults{F_Bool: Bool(true)}}, |
|||
{"int32", &pb.Defaults{F_Int32: Int32(12)}}, |
|||
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, |
|||
{"small int64", &pb.Defaults{F_Int64: Int64(1)}}, |
|||
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, |
|||
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, |
|||
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, |
|||
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, |
|||
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, |
|||
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, |
|||
{"float", &pb.Defaults{F_Float: Float32(12.6)}}, |
|||
{"double", &pb.Defaults{F_Double: Float64(13.9)}}, |
|||
{"string", &pb.Defaults{F_String: String("niles")}}, |
|||
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, |
|||
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, |
|||
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, |
|||
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, |
|||
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, |
|||
// Repeated.
|
|||
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, |
|||
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, |
|||
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, |
|||
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, |
|||
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, |
|||
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ |
|||
// Need enough large numbers to verify that the header is counting the number of bytes
|
|||
// for the field, not the number of elements.
|
|||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, |
|||
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, |
|||
}}}, |
|||
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, |
|||
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, |
|||
// Nested.
|
|||
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, |
|||
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, |
|||
// Other things.
|
|||
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, |
|||
{"extension (unencoded)", messageWithExtension1}, |
|||
{"extension (encoded)", messageWithExtension3}, |
|||
// proto3 message
|
|||
{"proto3 empty", &proto3pb.Message{}}, |
|||
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, |
|||
{"proto3 int64", &proto3pb.Message{ResultCount: 1}}, |
|||
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, |
|||
{"proto3 float", &proto3pb.Message{Score: 12.6}}, |
|||
{"proto3 string", &proto3pb.Message{Name: "Snezana"}}, |
|||
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, |
|||
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, |
|||
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, |
|||
{"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, |
|||
|
|||
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, |
|||
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, |
|||
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, |
|||
{"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, |
|||
|
|||
{"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, |
|||
{"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, |
|||
{"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, |
|||
|
|||
{"oneof not set", &pb.Oneof{}}, |
|||
{"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}}, |
|||
{"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}}, |
|||
{"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}}, |
|||
{"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}}, |
|||
{"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}}, |
|||
{"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}}, |
|||
{"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}}, |
|||
{"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}}, |
|||
{"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}}, |
|||
{"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}}, |
|||
{"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}}, |
|||
{"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}}, |
|||
{"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}}, |
|||
{"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}}, |
|||
{"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}}, |
|||
{"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}}, |
|||
{"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}}, |
|||
{"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, |
|||
{"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, |
|||
{"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, |
|||
} |
|||
|
|||
func TestSize(t *testing.T) { |
|||
for _, tc := range SizeTests { |
|||
size := Size(tc.pb) |
|||
b, err := Marshal(tc.pb) |
|||
if err != nil { |
|||
t.Errorf("%v: Marshal failed: %v", tc.desc, err) |
|||
continue |
|||
} |
|||
if size != len(b) { |
|||
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) |
|||
t.Logf("%v: bytes: %#v", tc.desc, b) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,50 @@ |
|||
# Go support for Protocol Buffers - Google's data interchange format
|
|||
#
|
|||
# Copyright 2010 The Go Authors. All rights reserved.
|
|||
# https://github.com/golang/protobuf
|
|||
#
|
|||
# Redistribution and use in source and binary forms, with or without
|
|||
# modification, are permitted provided that the following conditions are
|
|||
# met:
|
|||
#
|
|||
# * Redistributions of source code must retain the above copyright
|
|||
# notice, this list of conditions and the following disclaimer.
|
|||
# * Redistributions in binary form must reproduce the above
|
|||
# copyright notice, this list of conditions and the following disclaimer
|
|||
# in the documentation and/or other materials provided with the
|
|||
# distribution.
|
|||
# * Neither the name of Google Inc. nor the names of its
|
|||
# contributors may be used to endorse or promote products derived from
|
|||
# this software without specific prior written permission.
|
|||
#
|
|||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
|
|||
include ../../Make.protobuf |
|||
|
|||
all: regenerate |
|||
|
|||
regenerate: |
|||
rm -f test.pb.go |
|||
make test.pb.go |
|||
|
|||
# The following rules are just aids to development. Not needed for typical testing.
|
|||
|
|||
diff: regenerate |
|||
git diff test.pb.go |
|||
|
|||
restore: |
|||
cp test.pb.go.golden test.pb.go |
|||
|
|||
preserve: |
|||
cp test.pb.go test.pb.go.golden |
|||
@ -0,0 +1,86 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
// Verify that the compiler output for test.proto is unchanged.
|
|||
|
|||
package testdata |
|||
|
|||
import ( |
|||
"crypto/sha1" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"os" |
|||
"os/exec" |
|||
"path/filepath" |
|||
"testing" |
|||
) |
|||
|
|||
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
|
|||
func sum(t *testing.T, name string) string { |
|||
data, err := ioutil.ReadFile(name) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
t.Logf("sum(%q): length is %d", name, len(data)) |
|||
hash := sha1.New() |
|||
_, err = hash.Write(data) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
return fmt.Sprintf("% x", hash.Sum(nil)) |
|||
} |
|||
|
|||
func run(t *testing.T, name string, args ...string) { |
|||
cmd := exec.Command(name, args...) |
|||
cmd.Stdin = os.Stdin |
|||
cmd.Stdout = os.Stdout |
|||
cmd.Stderr = os.Stderr |
|||
err := cmd.Run() |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
|
|||
func TestGolden(t *testing.T) { |
|||
// Compute the original checksum.
|
|||
goldenSum := sum(t, "test.pb.go") |
|||
// Run the proto compiler.
|
|||
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") |
|||
newFile := filepath.Join(os.TempDir(), "test.pb.go") |
|||
defer os.Remove(newFile) |
|||
// Compute the new checksum.
|
|||
newSum := sum(t, newFile) |
|||
// Verify
|
|||
if newSum != goldenSum { |
|||
run(t, "diff", "-u", "test.pb.go", newFile) |
|||
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") |
|||
} |
|||
} |
|||
4061
vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,548 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format |
|||
// |
|||
// Copyright 2010 The Go Authors. All rights reserved. |
|||
// https://github.com/golang/protobuf |
|||
// |
|||
// Redistribution and use in source and binary forms, with or without |
|||
// modification, are permitted provided that the following conditions are |
|||
// met: |
|||
// |
|||
// * Redistributions of source code must retain the above copyright |
|||
// notice, this list of conditions and the following disclaimer. |
|||
// * Redistributions in binary form must reproduce the above |
|||
// copyright notice, this list of conditions and the following disclaimer |
|||
// in the documentation and/or other materials provided with the |
|||
// distribution. |
|||
// * Neither the name of Google Inc. nor the names of its |
|||
// contributors may be used to endorse or promote products derived from |
|||
// this software without specific prior written permission. |
|||
// |
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|||
|
|||
// A feature-rich test file for the protocol compiler and libraries. |
|||
|
|||
syntax = "proto2"; |
|||
|
|||
package testdata; |
|||
|
|||
enum FOO { FOO1 = 1; }; |
|||
|
|||
message GoEnum { |
|||
required FOO foo = 1; |
|||
} |
|||
|
|||
message GoTestField { |
|||
required string Label = 1; |
|||
required string Type = 2; |
|||
} |
|||
|
|||
message GoTest { |
|||
// An enum, for completeness. |
|||
enum KIND { |
|||
VOID = 0; |
|||
|
|||
// Basic types |
|||
BOOL = 1; |
|||
BYTES = 2; |
|||
FINGERPRINT = 3; |
|||
FLOAT = 4; |
|||
INT = 5; |
|||
STRING = 6; |
|||
TIME = 7; |
|||
|
|||
// Groupings |
|||
TUPLE = 8; |
|||
ARRAY = 9; |
|||
MAP = 10; |
|||
|
|||
// Table types |
|||
TABLE = 11; |
|||
|
|||
// Functions |
|||
FUNCTION = 12; // last tag |
|||
}; |
|||
|
|||
// Some typical parameters |
|||
required KIND Kind = 1; |
|||
optional string Table = 2; |
|||
optional int32 Param = 3; |
|||
|
|||
// Required, repeated and optional foreign fields. |
|||
required GoTestField RequiredField = 4; |
|||
repeated GoTestField RepeatedField = 5; |
|||
optional GoTestField OptionalField = 6; |
|||
|
|||
// Required fields of all basic types |
|||
required bool F_Bool_required = 10; |
|||
required int32 F_Int32_required = 11; |
|||
required int64 F_Int64_required = 12; |
|||
required fixed32 F_Fixed32_required = 13; |
|||
required fixed64 F_Fixed64_required = 14; |
|||
required uint32 F_Uint32_required = 15; |
|||
required uint64 F_Uint64_required = 16; |
|||
required float F_Float_required = 17; |
|||
required double F_Double_required = 18; |
|||
required string F_String_required = 19; |
|||
required bytes F_Bytes_required = 101; |
|||
required sint32 F_Sint32_required = 102; |
|||
required sint64 F_Sint64_required = 103; |
|||
|
|||
// Repeated fields of all basic types |
|||
repeated bool F_Bool_repeated = 20; |
|||
repeated int32 F_Int32_repeated = 21; |
|||
repeated int64 F_Int64_repeated = 22; |
|||
repeated fixed32 F_Fixed32_repeated = 23; |
|||
repeated fixed64 F_Fixed64_repeated = 24; |
|||
repeated uint32 F_Uint32_repeated = 25; |
|||
repeated uint64 F_Uint64_repeated = 26; |
|||
repeated float F_Float_repeated = 27; |
|||
repeated double F_Double_repeated = 28; |
|||
repeated string F_String_repeated = 29; |
|||
repeated bytes F_Bytes_repeated = 201; |
|||
repeated sint32 F_Sint32_repeated = 202; |
|||
repeated sint64 F_Sint64_repeated = 203; |
|||
|
|||
// Optional fields of all basic types |
|||
optional bool F_Bool_optional = 30; |
|||
optional int32 F_Int32_optional = 31; |
|||
optional int64 F_Int64_optional = 32; |
|||
optional fixed32 F_Fixed32_optional = 33; |
|||
optional fixed64 F_Fixed64_optional = 34; |
|||
optional uint32 F_Uint32_optional = 35; |
|||
optional uint64 F_Uint64_optional = 36; |
|||
optional float F_Float_optional = 37; |
|||
optional double F_Double_optional = 38; |
|||
optional string F_String_optional = 39; |
|||
optional bytes F_Bytes_optional = 301; |
|||
optional sint32 F_Sint32_optional = 302; |
|||
optional sint64 F_Sint64_optional = 303; |
|||
|
|||
// Default-valued fields of all basic types |
|||
optional bool F_Bool_defaulted = 40 [default=true]; |
|||
optional int32 F_Int32_defaulted = 41 [default=32]; |
|||
optional int64 F_Int64_defaulted = 42 [default=64]; |
|||
optional fixed32 F_Fixed32_defaulted = 43 [default=320]; |
|||
optional fixed64 F_Fixed64_defaulted = 44 [default=640]; |
|||
optional uint32 F_Uint32_defaulted = 45 [default=3200]; |
|||
optional uint64 F_Uint64_defaulted = 46 [default=6400]; |
|||
optional float F_Float_defaulted = 47 [default=314159.]; |
|||
optional double F_Double_defaulted = 48 [default=271828.]; |
|||
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"]; |
|||
optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; |
|||
optional sint32 F_Sint32_defaulted = 402 [default = -32]; |
|||
optional sint64 F_Sint64_defaulted = 403 [default = -64]; |
|||
|
|||
// Packed repeated fields (no string or bytes). |
|||
repeated bool F_Bool_repeated_packed = 50 [packed=true]; |
|||
repeated int32 F_Int32_repeated_packed = 51 [packed=true]; |
|||
repeated int64 F_Int64_repeated_packed = 52 [packed=true]; |
|||
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true]; |
|||
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true]; |
|||
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true]; |
|||
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true]; |
|||
repeated float F_Float_repeated_packed = 57 [packed=true]; |
|||
repeated double F_Double_repeated_packed = 58 [packed=true]; |
|||
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; |
|||
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; |
|||
|
|||
// Required, repeated, and optional groups. |
|||
required group RequiredGroup = 70 { |
|||
required string RequiredField = 71; |
|||
}; |
|||
|
|||
repeated group RepeatedGroup = 80 { |
|||
required string RequiredField = 81; |
|||
}; |
|||
|
|||
optional group OptionalGroup = 90 { |
|||
required string RequiredField = 91; |
|||
}; |
|||
} |
|||
|
|||
// For testing a group containing a required field. |
|||
message GoTestRequiredGroupField { |
|||
required group Group = 1 { |
|||
required int32 Field = 2; |
|||
}; |
|||
} |
|||
|
|||
// For testing skipping of unrecognized fields. |
|||
// Numbers are all big, larger than tag numbers in GoTestField, |
|||
// the message used in the corresponding test. |
|||
message GoSkipTest { |
|||
required int32 skip_int32 = 11; |
|||
required fixed32 skip_fixed32 = 12; |
|||
required fixed64 skip_fixed64 = 13; |
|||
required string skip_string = 14; |
|||
required group SkipGroup = 15 { |
|||
required int32 group_int32 = 16; |
|||
required string group_string = 17; |
|||
} |
|||
} |
|||
|
|||
// For testing packed/non-packed decoder switching. |
|||
// A serialized instance of one should be deserializable as the other. |
|||
message NonPackedTest { |
|||
repeated int32 a = 1; |
|||
} |
|||
|
|||
message PackedTest { |
|||
repeated int32 b = 1 [packed=true]; |
|||
} |
|||
|
|||
message MaxTag { |
|||
// Maximum possible tag number. |
|||
optional string last_field = 536870911; |
|||
} |
|||
|
|||
message OldMessage { |
|||
message Nested { |
|||
optional string name = 1; |
|||
} |
|||
optional Nested nested = 1; |
|||
|
|||
optional int32 num = 2; |
|||
} |
|||
|
|||
// NewMessage is wire compatible with OldMessage; |
|||
// imagine it as a future version. |
|||
message NewMessage { |
|||
message Nested { |
|||
optional string name = 1; |
|||
optional string food_group = 2; |
|||
} |
|||
optional Nested nested = 1; |
|||
|
|||
// This is an int32 in OldMessage. |
|||
optional int64 num = 2; |
|||
} |
|||
|
|||
// Smaller tests for ASCII formatting. |
|||
|
|||
message InnerMessage { |
|||
required string host = 1; |
|||
optional int32 port = 2 [default=4000]; |
|||
optional bool connected = 3; |
|||
} |
|||
|
|||
message OtherMessage { |
|||
optional int64 key = 1; |
|||
optional bytes value = 2; |
|||
optional float weight = 3; |
|||
optional InnerMessage inner = 4; |
|||
|
|||
extensions 100 to max; |
|||
} |
|||
|
|||
message RequiredInnerMessage { |
|||
required InnerMessage leo_finally_won_an_oscar = 1; |
|||
} |
|||
|
|||
message MyMessage { |
|||
required int32 count = 1; |
|||
optional string name = 2; |
|||
optional string quote = 3; |
|||
repeated string pet = 4; |
|||
optional InnerMessage inner = 5; |
|||
repeated OtherMessage others = 6; |
|||
optional RequiredInnerMessage we_must_go_deeper = 13; |
|||
repeated InnerMessage rep_inner = 12; |
|||
|
|||
enum Color { |
|||
RED = 0; |
|||
GREEN = 1; |
|||
BLUE = 2; |
|||
}; |
|||
optional Color bikeshed = 7; |
|||
|
|||
optional group SomeGroup = 8 { |
|||
optional int32 group_field = 9; |
|||
} |
|||
|
|||
// This field becomes [][]byte in the generated code. |
|||
repeated bytes rep_bytes = 10; |
|||
|
|||
optional double bigfloat = 11; |
|||
|
|||
extensions 100 to max; |
|||
} |
|||
|
|||
message Ext { |
|||
extend MyMessage { |
|||
optional Ext more = 103; |
|||
optional string text = 104; |
|||
optional int32 number = 105; |
|||
} |
|||
|
|||
optional string data = 1; |
|||
} |
|||
|
|||
extend MyMessage { |
|||
repeated string greeting = 106; |
|||
} |
|||
|
|||
message ComplexExtension { |
|||
optional int32 first = 1; |
|||
optional int32 second = 2; |
|||
repeated int32 third = 3; |
|||
} |
|||
|
|||
extend OtherMessage { |
|||
optional ComplexExtension complex = 200; |
|||
repeated ComplexExtension r_complex = 201; |
|||
} |
|||
|
|||
message DefaultsMessage { |
|||
enum DefaultsEnum { |
|||
ZERO = 0; |
|||
ONE = 1; |
|||
TWO = 2; |
|||
}; |
|||
extensions 100 to max; |
|||
} |
|||
|
|||
extend DefaultsMessage { |
|||
optional double no_default_double = 101; |
|||
optional float no_default_float = 102; |
|||
optional int32 no_default_int32 = 103; |
|||
optional int64 no_default_int64 = 104; |
|||
optional uint32 no_default_uint32 = 105; |
|||
optional uint64 no_default_uint64 = 106; |
|||
optional sint32 no_default_sint32 = 107; |
|||
optional sint64 no_default_sint64 = 108; |
|||
optional fixed32 no_default_fixed32 = 109; |
|||
optional fixed64 no_default_fixed64 = 110; |
|||
optional sfixed32 no_default_sfixed32 = 111; |
|||
optional sfixed64 no_default_sfixed64 = 112; |
|||
optional bool no_default_bool = 113; |
|||
optional string no_default_string = 114; |
|||
optional bytes no_default_bytes = 115; |
|||
optional DefaultsMessage.DefaultsEnum no_default_enum = 116; |
|||
|
|||
optional double default_double = 201 [default = 3.1415]; |
|||
optional float default_float = 202 [default = 3.14]; |
|||
optional int32 default_int32 = 203 [default = 42]; |
|||
optional int64 default_int64 = 204 [default = 43]; |
|||
optional uint32 default_uint32 = 205 [default = 44]; |
|||
optional uint64 default_uint64 = 206 [default = 45]; |
|||
optional sint32 default_sint32 = 207 [default = 46]; |
|||
optional sint64 default_sint64 = 208 [default = 47]; |
|||
optional fixed32 default_fixed32 = 209 [default = 48]; |
|||
optional fixed64 default_fixed64 = 210 [default = 49]; |
|||
optional sfixed32 default_sfixed32 = 211 [default = 50]; |
|||
optional sfixed64 default_sfixed64 = 212 [default = 51]; |
|||
optional bool default_bool = 213 [default = true]; |
|||
optional string default_string = 214 [default = "Hello, string"]; |
|||
optional bytes default_bytes = 215 [default = "Hello, bytes"]; |
|||
optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; |
|||
} |
|||
|
|||
message MyMessageSet { |
|||
option message_set_wire_format = true; |
|||
extensions 100 to max; |
|||
} |
|||
|
|||
message Empty { |
|||
} |
|||
|
|||
extend MyMessageSet { |
|||
optional Empty x201 = 201; |
|||
optional Empty x202 = 202; |
|||
optional Empty x203 = 203; |
|||
optional Empty x204 = 204; |
|||
optional Empty x205 = 205; |
|||
optional Empty x206 = 206; |
|||
optional Empty x207 = 207; |
|||
optional Empty x208 = 208; |
|||
optional Empty x209 = 209; |
|||
optional Empty x210 = 210; |
|||
optional Empty x211 = 211; |
|||
optional Empty x212 = 212; |
|||
optional Empty x213 = 213; |
|||
optional Empty x214 = 214; |
|||
optional Empty x215 = 215; |
|||
optional Empty x216 = 216; |
|||
optional Empty x217 = 217; |
|||
optional Empty x218 = 218; |
|||
optional Empty x219 = 219; |
|||
optional Empty x220 = 220; |
|||
optional Empty x221 = 221; |
|||
optional Empty x222 = 222; |
|||
optional Empty x223 = 223; |
|||
optional Empty x224 = 224; |
|||
optional Empty x225 = 225; |
|||
optional Empty x226 = 226; |
|||
optional Empty x227 = 227; |
|||
optional Empty x228 = 228; |
|||
optional Empty x229 = 229; |
|||
optional Empty x230 = 230; |
|||
optional Empty x231 = 231; |
|||
optional Empty x232 = 232; |
|||
optional Empty x233 = 233; |
|||
optional Empty x234 = 234; |
|||
optional Empty x235 = 235; |
|||
optional Empty x236 = 236; |
|||
optional Empty x237 = 237; |
|||
optional Empty x238 = 238; |
|||
optional Empty x239 = 239; |
|||
optional Empty x240 = 240; |
|||
optional Empty x241 = 241; |
|||
optional Empty x242 = 242; |
|||
optional Empty x243 = 243; |
|||
optional Empty x244 = 244; |
|||
optional Empty x245 = 245; |
|||
optional Empty x246 = 246; |
|||
optional Empty x247 = 247; |
|||
optional Empty x248 = 248; |
|||
optional Empty x249 = 249; |
|||
optional Empty x250 = 250; |
|||
} |
|||
|
|||
message MessageList { |
|||
repeated group Message = 1 { |
|||
required string name = 2; |
|||
required int32 count = 3; |
|||
} |
|||
} |
|||
|
|||
message Strings { |
|||
optional string string_field = 1; |
|||
optional bytes bytes_field = 2; |
|||
} |
|||
|
|||
message Defaults { |
|||
enum Color { |
|||
RED = 0; |
|||
GREEN = 1; |
|||
BLUE = 2; |
|||
} |
|||
|
|||
// Default-valued fields of all basic types. |
|||
// Same as GoTest, but copied here to make testing easier. |
|||
optional bool F_Bool = 1 [default=true]; |
|||
optional int32 F_Int32 = 2 [default=32]; |
|||
optional int64 F_Int64 = 3 [default=64]; |
|||
optional fixed32 F_Fixed32 = 4 [default=320]; |
|||
optional fixed64 F_Fixed64 = 5 [default=640]; |
|||
optional uint32 F_Uint32 = 6 [default=3200]; |
|||
optional uint64 F_Uint64 = 7 [default=6400]; |
|||
optional float F_Float = 8 [default=314159.]; |
|||
optional double F_Double = 9 [default=271828.]; |
|||
optional string F_String = 10 [default="hello, \"world!\"\n"]; |
|||
optional bytes F_Bytes = 11 [default="Bignose"]; |
|||
optional sint32 F_Sint32 = 12 [default=-32]; |
|||
optional sint64 F_Sint64 = 13 [default=-64]; |
|||
optional Color F_Enum = 14 [default=GREEN]; |
|||
|
|||
// More fields with crazy defaults. |
|||
optional float F_Pinf = 15 [default=inf]; |
|||
optional float F_Ninf = 16 [default=-inf]; |
|||
optional float F_Nan = 17 [default=nan]; |
|||
|
|||
// Sub-message. |
|||
optional SubDefaults sub = 18; |
|||
|
|||
// Redundant but explicit defaults. |
|||
optional string str_zero = 19 [default=""]; |
|||
} |
|||
|
|||
message SubDefaults { |
|||
optional int64 n = 1 [default=7]; |
|||
} |
|||
|
|||
message RepeatedEnum { |
|||
enum Color { |
|||
RED = 1; |
|||
} |
|||
repeated Color color = 1; |
|||
} |
|||
|
|||
message MoreRepeated { |
|||
repeated bool bools = 1; |
|||
repeated bool bools_packed = 2 [packed=true]; |
|||
repeated int32 ints = 3; |
|||
repeated int32 ints_packed = 4 [packed=true]; |
|||
repeated int64 int64s_packed = 7 [packed=true]; |
|||
repeated string strings = 5; |
|||
repeated fixed32 fixeds = 6; |
|||
} |
|||
|
|||
// GroupOld and GroupNew have the same wire format. |
|||
// GroupNew has a new field inside a group. |
|||
|
|||
message GroupOld { |
|||
optional group G = 101 { |
|||
optional int32 x = 2; |
|||
} |
|||
} |
|||
|
|||
message GroupNew { |
|||
optional group G = 101 { |
|||
optional int32 x = 2; |
|||
optional int32 y = 3; |
|||
} |
|||
} |
|||
|
|||
message FloatingPoint { |
|||
required double f = 1; |
|||
optional bool exact = 2; |
|||
} |
|||
|
|||
message MessageWithMap { |
|||
map<int32, string> name_mapping = 1; |
|||
map<sint64, FloatingPoint> msg_mapping = 2; |
|||
map<bool, bytes> byte_mapping = 3; |
|||
map<string, string> str_to_str = 4; |
|||
} |
|||
|
|||
message Oneof { |
|||
oneof union { |
|||
bool F_Bool = 1; |
|||
int32 F_Int32 = 2; |
|||
int64 F_Int64 = 3; |
|||
fixed32 F_Fixed32 = 4; |
|||
fixed64 F_Fixed64 = 5; |
|||
uint32 F_Uint32 = 6; |
|||
uint64 F_Uint64 = 7; |
|||
float F_Float = 8; |
|||
double F_Double = 9; |
|||
string F_String = 10; |
|||
bytes F_Bytes = 11; |
|||
sint32 F_Sint32 = 12; |
|||
sint64 F_Sint64 = 13; |
|||
MyMessage.Color F_Enum = 14; |
|||
GoTestField F_Message = 15; |
|||
group F_Group = 16 { |
|||
optional int32 x = 17; |
|||
} |
|||
int32 F_Largest_Tag = 536870911; |
|||
} |
|||
|
|||
oneof tormato { |
|||
int32 value = 100; |
|||
} |
|||
} |
|||
|
|||
message Communique { |
|||
optional bool make_me_cry = 1; |
|||
|
|||
// This is a oneof, called "union". |
|||
oneof union { |
|||
int32 number = 5; |
|||
string name = 6; |
|||
bytes data = 7; |
|||
double temp_c = 8; |
|||
MyMessage.Color col = 9; |
|||
Strings msg = 10; |
|||
} |
|||
} |
|||
@ -0,0 +1,854 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
// Functions for writing the text protocol buffer format.
|
|||
|
|||
import ( |
|||
"bufio" |
|||
"bytes" |
|||
"encoding" |
|||
"errors" |
|||
"fmt" |
|||
"io" |
|||
"log" |
|||
"math" |
|||
"reflect" |
|||
"sort" |
|||
"strings" |
|||
) |
|||
|
|||
var ( |
|||
newline = []byte("\n") |
|||
spaces = []byte(" ") |
|||
gtNewline = []byte(">\n") |
|||
endBraceNewline = []byte("}\n") |
|||
backslashN = []byte{'\\', 'n'} |
|||
backslashR = []byte{'\\', 'r'} |
|||
backslashT = []byte{'\\', 't'} |
|||
backslashDQ = []byte{'\\', '"'} |
|||
backslashBS = []byte{'\\', '\\'} |
|||
posInf = []byte("inf") |
|||
negInf = []byte("-inf") |
|||
nan = []byte("nan") |
|||
) |
|||
|
|||
type writer interface { |
|||
io.Writer |
|||
WriteByte(byte) error |
|||
} |
|||
|
|||
// textWriter is an io.Writer that tracks its indentation level.
|
|||
type textWriter struct { |
|||
ind int |
|||
complete bool // if the current position is a complete line
|
|||
compact bool // whether to write out as a one-liner
|
|||
w writer |
|||
} |
|||
|
|||
func (w *textWriter) WriteString(s string) (n int, err error) { |
|||
if !strings.Contains(s, "\n") { |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
w.complete = false |
|||
return io.WriteString(w.w, s) |
|||
} |
|||
// WriteString is typically called without newlines, so this
|
|||
// codepath and its copy are rare. We copy to avoid
|
|||
// duplicating all of Write's logic here.
|
|||
return w.Write([]byte(s)) |
|||
} |
|||
|
|||
func (w *textWriter) Write(p []byte) (n int, err error) { |
|||
newlines := bytes.Count(p, newline) |
|||
if newlines == 0 { |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
n, err = w.w.Write(p) |
|||
w.complete = false |
|||
return n, err |
|||
} |
|||
|
|||
frags := bytes.SplitN(p, newline, newlines+1) |
|||
if w.compact { |
|||
for i, frag := range frags { |
|||
if i > 0 { |
|||
if err := w.w.WriteByte(' '); err != nil { |
|||
return n, err |
|||
} |
|||
n++ |
|||
} |
|||
nn, err := w.w.Write(frag) |
|||
n += nn |
|||
if err != nil { |
|||
return n, err |
|||
} |
|||
} |
|||
return n, nil |
|||
} |
|||
|
|||
for i, frag := range frags { |
|||
if w.complete { |
|||
w.writeIndent() |
|||
} |
|||
nn, err := w.w.Write(frag) |
|||
n += nn |
|||
if err != nil { |
|||
return n, err |
|||
} |
|||
if i+1 < len(frags) { |
|||
if err := w.w.WriteByte('\n'); err != nil { |
|||
return n, err |
|||
} |
|||
n++ |
|||
} |
|||
} |
|||
w.complete = len(frags[len(frags)-1]) == 0 |
|||
return n, nil |
|||
} |
|||
|
|||
func (w *textWriter) WriteByte(c byte) error { |
|||
if w.compact && c == '\n' { |
|||
c = ' ' |
|||
} |
|||
if !w.compact && w.complete { |
|||
w.writeIndent() |
|||
} |
|||
err := w.w.WriteByte(c) |
|||
w.complete = c == '\n' |
|||
return err |
|||
} |
|||
|
|||
func (w *textWriter) indent() { w.ind++ } |
|||
|
|||
func (w *textWriter) unindent() { |
|||
if w.ind == 0 { |
|||
log.Print("proto: textWriter unindented too far") |
|||
return |
|||
} |
|||
w.ind-- |
|||
} |
|||
|
|||
func writeName(w *textWriter, props *Properties) error { |
|||
if _, err := w.WriteString(props.OrigName); err != nil { |
|||
return err |
|||
} |
|||
if props.Wire != "group" { |
|||
return w.WriteByte(':') |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// raw is the interface satisfied by RawMessage.
|
|||
type raw interface { |
|||
Bytes() []byte |
|||
} |
|||
|
|||
func requiresQuotes(u string) bool { |
|||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|||
for _, ch := range u { |
|||
switch { |
|||
case ch == '.' || ch == '/' || ch == '_': |
|||
continue |
|||
case '0' <= ch && ch <= '9': |
|||
continue |
|||
case 'A' <= ch && ch <= 'Z': |
|||
continue |
|||
case 'a' <= ch && ch <= 'z': |
|||
continue |
|||
default: |
|||
return true |
|||
} |
|||
} |
|||
return false |
|||
} |
|||
|
|||
// isAny reports whether sv is a google.protobuf.Any message
|
|||
func isAny(sv reflect.Value) bool { |
|||
type wkt interface { |
|||
XXX_WellKnownType() string |
|||
} |
|||
t, ok := sv.Addr().Interface().(wkt) |
|||
return ok && t.XXX_WellKnownType() == "Any" |
|||
} |
|||
|
|||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|||
//
|
|||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|||
// required messages are not linked in).
|
|||
//
|
|||
// It returns (true, error) when sv was written in expanded format or an error
|
|||
// was encountered.
|
|||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { |
|||
turl := sv.FieldByName("TypeUrl") |
|||
val := sv.FieldByName("Value") |
|||
if !turl.IsValid() || !val.IsValid() { |
|||
return true, errors.New("proto: invalid google.protobuf.Any message") |
|||
} |
|||
|
|||
b, ok := val.Interface().([]byte) |
|||
if !ok { |
|||
return true, errors.New("proto: invalid google.protobuf.Any message") |
|||
} |
|||
|
|||
parts := strings.Split(turl.String(), "/") |
|||
mt := MessageType(parts[len(parts)-1]) |
|||
if mt == nil { |
|||
return false, nil |
|||
} |
|||
m := reflect.New(mt.Elem()) |
|||
if err := Unmarshal(b, m.Interface().(Message)); err != nil { |
|||
return false, nil |
|||
} |
|||
w.Write([]byte("[")) |
|||
u := turl.String() |
|||
if requiresQuotes(u) { |
|||
writeString(w, u) |
|||
} else { |
|||
w.Write([]byte(u)) |
|||
} |
|||
if w.compact { |
|||
w.Write([]byte("]:<")) |
|||
} else { |
|||
w.Write([]byte("]: <\n")) |
|||
w.ind++ |
|||
} |
|||
if err := tm.writeStruct(w, m.Elem()); err != nil { |
|||
return true, err |
|||
} |
|||
if w.compact { |
|||
w.Write([]byte("> ")) |
|||
} else { |
|||
w.ind-- |
|||
w.Write([]byte(">\n")) |
|||
} |
|||
return true, nil |
|||
} |
|||
|
|||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { |
|||
if tm.ExpandAny && isAny(sv) { |
|||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand { |
|||
return err |
|||
} |
|||
} |
|||
st := sv.Type() |
|||
sprops := GetProperties(st) |
|||
for i := 0; i < sv.NumField(); i++ { |
|||
fv := sv.Field(i) |
|||
props := sprops.Prop[i] |
|||
name := st.Field(i).Name |
|||
|
|||
if strings.HasPrefix(name, "XXX_") { |
|||
// There are two XXX_ fields:
|
|||
// XXX_unrecognized []byte
|
|||
// XXX_extensions map[int32]proto.Extension
|
|||
// The first is handled here;
|
|||
// the second is handled at the bottom of this function.
|
|||
if name == "XXX_unrecognized" && !fv.IsNil() { |
|||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
continue |
|||
} |
|||
if fv.Kind() == reflect.Ptr && fv.IsNil() { |
|||
// Field not filled in. This could be an optional field or
|
|||
// a required field that wasn't filled in. Either way, there
|
|||
// isn't anything we can show for it.
|
|||
continue |
|||
} |
|||
if fv.Kind() == reflect.Slice && fv.IsNil() { |
|||
// Repeated field that is empty, or a bytes field that is unused.
|
|||
continue |
|||
} |
|||
|
|||
if props.Repeated && fv.Kind() == reflect.Slice { |
|||
// Repeated field.
|
|||
for j := 0; j < fv.Len(); j++ { |
|||
if err := writeName(w, props); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
v := fv.Index(j) |
|||
if v.Kind() == reflect.Ptr && v.IsNil() { |
|||
// A nil message in a repeated field is not valid,
|
|||
// but we can handle that more gracefully than panicking.
|
|||
if _, err := w.Write([]byte("<nil>\n")); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
if err := tm.writeAny(w, v, props); err != nil { |
|||
return err |
|||
} |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
continue |
|||
} |
|||
if fv.Kind() == reflect.Map { |
|||
// Map fields are rendered as a repeated struct with key/value fields.
|
|||
keys := fv.MapKeys() |
|||
sort.Sort(mapKeys(keys)) |
|||
for _, key := range keys { |
|||
val := fv.MapIndex(key) |
|||
if err := writeName(w, props); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
// open struct
|
|||
if err := w.WriteByte('<'); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
w.indent() |
|||
// key
|
|||
if _, err := w.WriteString("key:"); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil { |
|||
return err |
|||
} |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
// nil values aren't legal, but we can avoid panicking because of them.
|
|||
if val.Kind() != reflect.Ptr || !val.IsNil() { |
|||
// value
|
|||
if _, err := w.WriteString("value:"); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if err := tm.writeAny(w, val, props.mvalprop); err != nil { |
|||
return err |
|||
} |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
// close struct
|
|||
w.unindent() |
|||
if err := w.WriteByte('>'); err != nil { |
|||
return err |
|||
} |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
continue |
|||
} |
|||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { |
|||
// empty bytes field
|
|||
continue |
|||
} |
|||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { |
|||
// proto3 non-repeated scalar field; skip if zero value
|
|||
if isProto3Zero(fv) { |
|||
continue |
|||
} |
|||
} |
|||
|
|||
if fv.Kind() == reflect.Interface { |
|||
// Check if it is a oneof.
|
|||
if st.Field(i).Tag.Get("protobuf_oneof") != "" { |
|||
// fv is nil, or holds a pointer to generated struct.
|
|||
// That generated struct has exactly one field,
|
|||
// which has a protobuf struct tag.
|
|||
if fv.IsNil() { |
|||
continue |
|||
} |
|||
inner := fv.Elem().Elem() // interface -> *T -> T
|
|||
tag := inner.Type().Field(0).Tag.Get("protobuf") |
|||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
|||
props.Parse(tag) |
|||
// Write the value in the oneof, not the oneof itself.
|
|||
fv = inner.Field(0) |
|||
|
|||
// Special case to cope with malformed messages gracefully:
|
|||
// If the value in the oneof is a nil pointer, don't panic
|
|||
// in writeAny.
|
|||
if fv.Kind() == reflect.Ptr && fv.IsNil() { |
|||
// Use errors.New so writeAny won't render quotes.
|
|||
msg := errors.New("/* nil */") |
|||
fv = reflect.ValueOf(&msg).Elem() |
|||
} |
|||
} |
|||
} |
|||
|
|||
if err := writeName(w, props); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if b, ok := fv.Interface().(raw); ok { |
|||
if err := writeRaw(w, b.Bytes()); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
|
|||
// Enums have a String method, so writeAny will work fine.
|
|||
if err := tm.writeAny(w, fv, props); err != nil { |
|||
return err |
|||
} |
|||
|
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
// Extensions (the XXX_extensions field).
|
|||
pv := sv.Addr() |
|||
if _, ok := extendable(pv.Interface()); ok { |
|||
if err := tm.writeExtensions(w, pv); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// writeRaw writes an uninterpreted raw message.
|
|||
func writeRaw(w *textWriter, b []byte) error { |
|||
if err := w.WriteByte('<'); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
w.indent() |
|||
if err := writeUnknownStruct(w, b); err != nil { |
|||
return err |
|||
} |
|||
w.unindent() |
|||
if err := w.WriteByte('>'); err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// writeAny writes an arbitrary field.
|
|||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { |
|||
v = reflect.Indirect(v) |
|||
|
|||
// Floats have special cases.
|
|||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { |
|||
x := v.Float() |
|||
var b []byte |
|||
switch { |
|||
case math.IsInf(x, 1): |
|||
b = posInf |
|||
case math.IsInf(x, -1): |
|||
b = negInf |
|||
case math.IsNaN(x): |
|||
b = nan |
|||
} |
|||
if b != nil { |
|||
_, err := w.Write(b) |
|||
return err |
|||
} |
|||
// Other values are handled below.
|
|||
} |
|||
|
|||
// We don't attempt to serialise every possible value type; only those
|
|||
// that can occur in protocol buffers.
|
|||
switch v.Kind() { |
|||
case reflect.Slice: |
|||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
|||
if err := writeString(w, string(v.Bytes())); err != nil { |
|||
return err |
|||
} |
|||
case reflect.String: |
|||
if err := writeString(w, v.String()); err != nil { |
|||
return err |
|||
} |
|||
case reflect.Struct: |
|||
// Required/optional group/message.
|
|||
var bra, ket byte = '<', '>' |
|||
if props != nil && props.Wire == "group" { |
|||
bra, ket = '{', '}' |
|||
} |
|||
if err := w.WriteByte(bra); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
w.indent() |
|||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok { |
|||
text, err := etm.MarshalText() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if _, err = w.Write(text); err != nil { |
|||
return err |
|||
} |
|||
} else if err := tm.writeStruct(w, v); err != nil { |
|||
return err |
|||
} |
|||
w.unindent() |
|||
if err := w.WriteByte(ket); err != nil { |
|||
return err |
|||
} |
|||
default: |
|||
_, err := fmt.Fprint(w, v.Interface()) |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// equivalent to C's isprint.
|
|||
func isprint(c byte) bool { |
|||
return c >= 0x20 && c < 0x7f |
|||
} |
|||
|
|||
// writeString writes a string in the protocol buffer text format.
|
|||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
|||
// we treat the string as a byte sequence, and we use octal escapes.
|
|||
// These differences are to maintain interoperability with the other
|
|||
// languages' implementations of the text format.
|
|||
func writeString(w *textWriter, s string) error { |
|||
// use WriteByte here to get any needed indent
|
|||
if err := w.WriteByte('"'); err != nil { |
|||
return err |
|||
} |
|||
// Loop over the bytes, not the runes.
|
|||
for i := 0; i < len(s); i++ { |
|||
var err error |
|||
// Divergence from C++: we don't escape apostrophes.
|
|||
// There's no need to escape them, and the C++ parser
|
|||
// copes with a naked apostrophe.
|
|||
switch c := s[i]; c { |
|||
case '\n': |
|||
_, err = w.w.Write(backslashN) |
|||
case '\r': |
|||
_, err = w.w.Write(backslashR) |
|||
case '\t': |
|||
_, err = w.w.Write(backslashT) |
|||
case '"': |
|||
_, err = w.w.Write(backslashDQ) |
|||
case '\\': |
|||
_, err = w.w.Write(backslashBS) |
|||
default: |
|||
if isprint(c) { |
|||
err = w.w.WriteByte(c) |
|||
} else { |
|||
_, err = fmt.Fprintf(w.w, "\\%03o", c) |
|||
} |
|||
} |
|||
if err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return w.WriteByte('"') |
|||
} |
|||
|
|||
func writeUnknownStruct(w *textWriter, data []byte) (err error) { |
|||
if !w.compact { |
|||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
b := NewBuffer(data) |
|||
for b.index < len(b.buf) { |
|||
x, err := b.DecodeVarint() |
|||
if err != nil { |
|||
_, err := fmt.Fprintf(w, "/* %v */\n", err) |
|||
return err |
|||
} |
|||
wire, tag := x&7, x>>3 |
|||
if wire == WireEndGroup { |
|||
w.unindent() |
|||
if _, err := w.Write(endBraceNewline); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
if _, err := fmt.Fprint(w, tag); err != nil { |
|||
return err |
|||
} |
|||
if wire != WireStartGroup { |
|||
if err := w.WriteByte(':'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if !w.compact || wire == WireStartGroup { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
switch wire { |
|||
case WireBytes: |
|||
buf, e := b.DecodeRawBytes(false) |
|||
if e == nil { |
|||
_, err = fmt.Fprintf(w, "%q", buf) |
|||
} else { |
|||
_, err = fmt.Fprintf(w, "/* %v */", e) |
|||
} |
|||
case WireFixed32: |
|||
x, err = b.DecodeFixed32() |
|||
err = writeUnknownInt(w, x, err) |
|||
case WireFixed64: |
|||
x, err = b.DecodeFixed64() |
|||
err = writeUnknownInt(w, x, err) |
|||
case WireStartGroup: |
|||
err = w.WriteByte('{') |
|||
w.indent() |
|||
case WireVarint: |
|||
x, err = b.DecodeVarint() |
|||
err = writeUnknownInt(w, x, err) |
|||
default: |
|||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) |
|||
} |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if err = w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func writeUnknownInt(w *textWriter, x uint64, err error) error { |
|||
if err == nil { |
|||
_, err = fmt.Fprint(w, x) |
|||
} else { |
|||
_, err = fmt.Fprintf(w, "/* %v */", err) |
|||
} |
|||
return err |
|||
} |
|||
|
|||
type int32Slice []int32 |
|||
|
|||
func (s int32Slice) Len() int { return len(s) } |
|||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } |
|||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } |
|||
|
|||
// writeExtensions writes all the extensions in pv.
|
|||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
|||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { |
|||
emap := extensionMaps[pv.Type().Elem()] |
|||
ep, _ := extendable(pv.Interface()) |
|||
|
|||
// Order the extensions by ID.
|
|||
// This isn't strictly necessary, but it will give us
|
|||
// canonical output, which will also make testing easier.
|
|||
m, mu := ep.extensionsRead() |
|||
if m == nil { |
|||
return nil |
|||
} |
|||
mu.Lock() |
|||
ids := make([]int32, 0, len(m)) |
|||
for id := range m { |
|||
ids = append(ids, id) |
|||
} |
|||
sort.Sort(int32Slice(ids)) |
|||
mu.Unlock() |
|||
|
|||
for _, extNum := range ids { |
|||
ext := m[extNum] |
|||
var desc *ExtensionDesc |
|||
if emap != nil { |
|||
desc = emap[extNum] |
|||
} |
|||
if desc == nil { |
|||
// Unknown extension.
|
|||
if err := writeUnknownStruct(w, ext.enc); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
|
|||
pb, err := GetExtension(ep, desc) |
|||
if err != nil { |
|||
return fmt.Errorf("failed getting extension: %v", err) |
|||
} |
|||
|
|||
// Repeated extensions will appear as a slice.
|
|||
if !desc.repeated() { |
|||
if err := tm.writeExtension(w, desc.Name, pb); err != nil { |
|||
return err |
|||
} |
|||
} else { |
|||
v := reflect.ValueOf(pb) |
|||
for i := 0; i < v.Len(); i++ { |
|||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { |
|||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { |
|||
return err |
|||
} |
|||
if !w.compact { |
|||
if err := w.WriteByte(' '); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { |
|||
return err |
|||
} |
|||
if err := w.WriteByte('\n'); err != nil { |
|||
return err |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (w *textWriter) writeIndent() { |
|||
if !w.complete { |
|||
return |
|||
} |
|||
remain := w.ind * 2 |
|||
for remain > 0 { |
|||
n := remain |
|||
if n > len(spaces) { |
|||
n = len(spaces) |
|||
} |
|||
w.w.Write(spaces[:n]) |
|||
remain -= n |
|||
} |
|||
w.complete = false |
|||
} |
|||
|
|||
// TextMarshaler is a configurable text format marshaler.
|
|||
type TextMarshaler struct { |
|||
Compact bool // use compact text format (one line).
|
|||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|||
} |
|||
|
|||
// Marshal writes a given protocol buffer in text format.
|
|||
// The only errors returned are from w.
|
|||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { |
|||
val := reflect.ValueOf(pb) |
|||
if pb == nil || val.IsNil() { |
|||
w.Write([]byte("<nil>")) |
|||
return nil |
|||
} |
|||
var bw *bufio.Writer |
|||
ww, ok := w.(writer) |
|||
if !ok { |
|||
bw = bufio.NewWriter(w) |
|||
ww = bw |
|||
} |
|||
aw := &textWriter{ |
|||
w: ww, |
|||
complete: true, |
|||
compact: tm.Compact, |
|||
} |
|||
|
|||
if etm, ok := pb.(encoding.TextMarshaler); ok { |
|||
text, err := etm.MarshalText() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if _, err = aw.Write(text); err != nil { |
|||
return err |
|||
} |
|||
if bw != nil { |
|||
return bw.Flush() |
|||
} |
|||
return nil |
|||
} |
|||
// Dereference the received pointer so we don't have outer < and >.
|
|||
v := reflect.Indirect(val) |
|||
if err := tm.writeStruct(aw, v); err != nil { |
|||
return err |
|||
} |
|||
if bw != nil { |
|||
return bw.Flush() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Text is the same as Marshal, but returns the string directly.
|
|||
func (tm *TextMarshaler) Text(pb Message) string { |
|||
var buf bytes.Buffer |
|||
tm.Marshal(&buf, pb) |
|||
return buf.String() |
|||
} |
|||
|
|||
var ( |
|||
defaultTextMarshaler = TextMarshaler{} |
|||
compactTextMarshaler = TextMarshaler{Compact: true} |
|||
) |
|||
|
|||
// TODO: consider removing some of the Marshal functions below.
|
|||
|
|||
// MarshalText writes a given protocol buffer in text format.
|
|||
// The only errors returned are from w.
|
|||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } |
|||
|
|||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
|||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } |
|||
|
|||
// CompactText writes a given protocol buffer in compact text format (one line).
|
|||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } |
|||
|
|||
// CompactTextString is the same as CompactText, but returns the string directly.
|
|||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } |
|||
@ -0,0 +1,891 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto |
|||
|
|||
// Functions for parsing the Text protocol buffer format.
|
|||
// TODO: message sets.
|
|||
|
|||
import ( |
|||
"encoding" |
|||
"errors" |
|||
"fmt" |
|||
"reflect" |
|||
"strconv" |
|||
"strings" |
|||
"unicode/utf8" |
|||
) |
|||
|
|||
// Error string emitted when deserializing Any and fields are already set
|
|||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" |
|||
|
|||
type ParseError struct { |
|||
Message string |
|||
Line int // 1-based line number
|
|||
Offset int // 0-based byte offset from start of input
|
|||
} |
|||
|
|||
func (p *ParseError) Error() string { |
|||
if p.Line == 1 { |
|||
// show offset only for first line
|
|||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) |
|||
} |
|||
return fmt.Sprintf("line %d: %v", p.Line, p.Message) |
|||
} |
|||
|
|||
type token struct { |
|||
value string |
|||
err *ParseError |
|||
line int // line number
|
|||
offset int // byte number from start of input, not start of line
|
|||
unquoted string // the unquoted version of value, if it was a quoted string
|
|||
} |
|||
|
|||
func (t *token) String() string { |
|||
if t.err == nil { |
|||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) |
|||
} |
|||
return fmt.Sprintf("parse error: %v", t.err) |
|||
} |
|||
|
|||
type textParser struct { |
|||
s string // remaining input
|
|||
done bool // whether the parsing is finished (success or error)
|
|||
backed bool // whether back() was called
|
|||
offset, line int |
|||
cur token |
|||
} |
|||
|
|||
func newTextParser(s string) *textParser { |
|||
p := new(textParser) |
|||
p.s = s |
|||
p.line = 1 |
|||
p.cur.line = 1 |
|||
return p |
|||
} |
|||
|
|||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError { |
|||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} |
|||
p.cur.err = pe |
|||
p.done = true |
|||
return pe |
|||
} |
|||
|
|||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
|||
func isIdentOrNumberChar(c byte) bool { |
|||
switch { |
|||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': |
|||
return true |
|||
case '0' <= c && c <= '9': |
|||
return true |
|||
} |
|||
switch c { |
|||
case '-', '+', '.', '_': |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func isWhitespace(c byte) bool { |
|||
switch c { |
|||
case ' ', '\t', '\n', '\r': |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func isQuote(c byte) bool { |
|||
switch c { |
|||
case '"', '\'': |
|||
return true |
|||
} |
|||
return false |
|||
} |
|||
|
|||
func (p *textParser) skipWhitespace() { |
|||
i := 0 |
|||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { |
|||
if p.s[i] == '#' { |
|||
// comment; skip to end of line or input
|
|||
for i < len(p.s) && p.s[i] != '\n' { |
|||
i++ |
|||
} |
|||
if i == len(p.s) { |
|||
break |
|||
} |
|||
} |
|||
if p.s[i] == '\n' { |
|||
p.line++ |
|||
} |
|||
i++ |
|||
} |
|||
p.offset += i |
|||
p.s = p.s[i:len(p.s)] |
|||
if len(p.s) == 0 { |
|||
p.done = true |
|||
} |
|||
} |
|||
|
|||
func (p *textParser) advance() { |
|||
// Skip whitespace
|
|||
p.skipWhitespace() |
|||
if p.done { |
|||
return |
|||
} |
|||
|
|||
// Start of non-whitespace
|
|||
p.cur.err = nil |
|||
p.cur.offset, p.cur.line = p.offset, p.line |
|||
p.cur.unquoted = "" |
|||
switch p.s[0] { |
|||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': |
|||
// Single symbol
|
|||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] |
|||
case '"', '\'': |
|||
// Quoted string
|
|||
i := 1 |
|||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { |
|||
if p.s[i] == '\\' && i+1 < len(p.s) { |
|||
// skip escaped char
|
|||
i++ |
|||
} |
|||
i++ |
|||
} |
|||
if i >= len(p.s) || p.s[i] != p.s[0] { |
|||
p.errorf("unmatched quote") |
|||
return |
|||
} |
|||
unq, err := unquoteC(p.s[1:i], rune(p.s[0])) |
|||
if err != nil { |
|||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) |
|||
return |
|||
} |
|||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] |
|||
p.cur.unquoted = unq |
|||
default: |
|||
i := 0 |
|||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { |
|||
i++ |
|||
} |
|||
if i == 0 { |
|||
p.errorf("unexpected byte %#x", p.s[0]) |
|||
return |
|||
} |
|||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] |
|||
} |
|||
p.offset += len(p.cur.value) |
|||
} |
|||
|
|||
var ( |
|||
errBadUTF8 = errors.New("proto: bad UTF-8") |
|||
errBadHex = errors.New("proto: bad hexadecimal") |
|||
) |
|||
|
|||
func unquoteC(s string, quote rune) (string, error) { |
|||
// This is based on C++'s tokenizer.cc.
|
|||
// Despite its name, this is *not* parsing C syntax.
|
|||
// For instance, "\0" is an invalid quoted string.
|
|||
|
|||
// Avoid allocation in trivial cases.
|
|||
simple := true |
|||
for _, r := range s { |
|||
if r == '\\' || r == quote { |
|||
simple = false |
|||
break |
|||
} |
|||
} |
|||
if simple { |
|||
return s, nil |
|||
} |
|||
|
|||
buf := make([]byte, 0, 3*len(s)/2) |
|||
for len(s) > 0 { |
|||
r, n := utf8.DecodeRuneInString(s) |
|||
if r == utf8.RuneError && n == 1 { |
|||
return "", errBadUTF8 |
|||
} |
|||
s = s[n:] |
|||
if r != '\\' { |
|||
if r < utf8.RuneSelf { |
|||
buf = append(buf, byte(r)) |
|||
} else { |
|||
buf = append(buf, string(r)...) |
|||
} |
|||
continue |
|||
} |
|||
|
|||
ch, tail, err := unescape(s) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
buf = append(buf, ch...) |
|||
s = tail |
|||
} |
|||
return string(buf), nil |
|||
} |
|||
|
|||
func unescape(s string) (ch string, tail string, err error) { |
|||
r, n := utf8.DecodeRuneInString(s) |
|||
if r == utf8.RuneError && n == 1 { |
|||
return "", "", errBadUTF8 |
|||
} |
|||
s = s[n:] |
|||
switch r { |
|||
case 'a': |
|||
return "\a", s, nil |
|||
case 'b': |
|||
return "\b", s, nil |
|||
case 'f': |
|||
return "\f", s, nil |
|||
case 'n': |
|||
return "\n", s, nil |
|||
case 'r': |
|||
return "\r", s, nil |
|||
case 't': |
|||
return "\t", s, nil |
|||
case 'v': |
|||
return "\v", s, nil |
|||
case '?': |
|||
return "?", s, nil // trigraph workaround
|
|||
case '\'', '"', '\\': |
|||
return string(r), s, nil |
|||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': |
|||
if len(s) < 2 { |
|||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) |
|||
} |
|||
base := 8 |
|||
ss := s[:2] |
|||
s = s[2:] |
|||
if r == 'x' || r == 'X' { |
|||
base = 16 |
|||
} else { |
|||
ss = string(r) + ss |
|||
} |
|||
i, err := strconv.ParseUint(ss, base, 8) |
|||
if err != nil { |
|||
return "", "", err |
|||
} |
|||
return string([]byte{byte(i)}), s, nil |
|||
case 'u', 'U': |
|||
n := 4 |
|||
if r == 'U' { |
|||
n = 8 |
|||
} |
|||
if len(s) < n { |
|||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) |
|||
} |
|||
|
|||
bs := make([]byte, n/2) |
|||
for i := 0; i < n; i += 2 { |
|||
a, ok1 := unhex(s[i]) |
|||
b, ok2 := unhex(s[i+1]) |
|||
if !ok1 || !ok2 { |
|||
return "", "", errBadHex |
|||
} |
|||
bs[i/2] = a<<4 | b |
|||
} |
|||
s = s[n:] |
|||
return string(bs), s, nil |
|||
} |
|||
return "", "", fmt.Errorf(`unknown escape \%c`, r) |
|||
} |
|||
|
|||
// Adapted from src/pkg/strconv/quote.go.
|
|||
func unhex(b byte) (v byte, ok bool) { |
|||
switch { |
|||
case '0' <= b && b <= '9': |
|||
return b - '0', true |
|||
case 'a' <= b && b <= 'f': |
|||
return b - 'a' + 10, true |
|||
case 'A' <= b && b <= 'F': |
|||
return b - 'A' + 10, true |
|||
} |
|||
return 0, false |
|||
} |
|||
|
|||
// Back off the parser by one token. Can only be done between calls to next().
|
|||
// It makes the next advance() a no-op.
|
|||
func (p *textParser) back() { p.backed = true } |
|||
|
|||
// Advances the parser and returns the new current token.
|
|||
func (p *textParser) next() *token { |
|||
if p.backed || p.done { |
|||
p.backed = false |
|||
return &p.cur |
|||
} |
|||
p.advance() |
|||
if p.done { |
|||
p.cur.value = "" |
|||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { |
|||
// Look for multiple quoted strings separated by whitespace,
|
|||
// and concatenate them.
|
|||
cat := p.cur |
|||
for { |
|||
p.skipWhitespace() |
|||
if p.done || !isQuote(p.s[0]) { |
|||
break |
|||
} |
|||
p.advance() |
|||
if p.cur.err != nil { |
|||
return &p.cur |
|||
} |
|||
cat.value += " " + p.cur.value |
|||
cat.unquoted += p.cur.unquoted |
|||
} |
|||
p.done = false // parser may have seen EOF, but we want to return cat
|
|||
p.cur = cat |
|||
} |
|||
return &p.cur |
|||
} |
|||
|
|||
func (p *textParser) consumeToken(s string) error { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != s { |
|||
p.back() |
|||
return p.errorf("expected %q, found %q", s, tok.value) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Return a RequiredNotSetError indicating which required field was not set.
|
|||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { |
|||
st := sv.Type() |
|||
sprops := GetProperties(st) |
|||
for i := 0; i < st.NumField(); i++ { |
|||
if !isNil(sv.Field(i)) { |
|||
continue |
|||
} |
|||
|
|||
props := sprops.Prop[i] |
|||
if props.Required { |
|||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} |
|||
} |
|||
} |
|||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
|||
} |
|||
|
|||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
|||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { |
|||
i, ok := sprops.decoderOrigNames[name] |
|||
if ok { |
|||
return i, sprops.Prop[i], true |
|||
} |
|||
return -1, nil, false |
|||
} |
|||
|
|||
// Consume a ':' from the input stream (if the next token is a colon),
|
|||
// returning an error if a colon is needed but not present.
|
|||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != ":" { |
|||
// Colon is optional when the field is a group or message.
|
|||
needColon := true |
|||
switch props.Wire { |
|||
case "group": |
|||
needColon = false |
|||
case "bytes": |
|||
// A "bytes" field is either a message, a string, or a repeated field;
|
|||
// those three become *T, *string and []T respectively, so we can check for
|
|||
// this field being a pointer to a non-string.
|
|||
if typ.Kind() == reflect.Ptr { |
|||
// *T or *string
|
|||
if typ.Elem().Kind() == reflect.String { |
|||
break |
|||
} |
|||
} else if typ.Kind() == reflect.Slice { |
|||
// []T or []*T
|
|||
if typ.Elem().Kind() != reflect.Ptr { |
|||
break |
|||
} |
|||
} else if typ.Kind() == reflect.String { |
|||
// The proto3 exception is for a string field,
|
|||
// which requires a colon.
|
|||
break |
|||
} |
|||
needColon = false |
|||
} |
|||
if needColon { |
|||
return p.errorf("expected ':', found %q", tok.value) |
|||
} |
|||
p.back() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error { |
|||
st := sv.Type() |
|||
sprops := GetProperties(st) |
|||
reqCount := sprops.reqCount |
|||
var reqFieldErr error |
|||
fieldSet := make(map[string]bool) |
|||
// A struct is a sequence of "name: value", terminated by one of
|
|||
// '>' or '}', or the end of the input. A name may also be
|
|||
// "[extension]" or "[type/url]".
|
|||
//
|
|||
// The whole struct can also be an expanded Any message, like:
|
|||
// [type/url] < ... struct contents ... >
|
|||
for { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value == terminator { |
|||
break |
|||
} |
|||
if tok.value == "[" { |
|||
// Looks like an extension or an Any.
|
|||
//
|
|||
// TODO: Check whether we need to handle
|
|||
// namespace rooted names (e.g. ".something.Foo").
|
|||
extName, err := p.consumeExtName() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if s := strings.LastIndex(extName, "/"); s >= 0 { |
|||
// If it contains a slash, it's an Any type URL.
|
|||
messageName := extName[s+1:] |
|||
mt := MessageType(messageName) |
|||
if mt == nil { |
|||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) |
|||
} |
|||
tok = p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
// consume an optional colon
|
|||
if tok.value == ":" { |
|||
tok = p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
} |
|||
var terminator string |
|||
switch tok.value { |
|||
case "<": |
|||
terminator = ">" |
|||
case "{": |
|||
terminator = "}" |
|||
default: |
|||
return p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
v := reflect.New(mt.Elem()) |
|||
if pe := p.readStruct(v.Elem(), terminator); pe != nil { |
|||
return pe |
|||
} |
|||
b, err := Marshal(v.Interface().(Message)) |
|||
if err != nil { |
|||
return p.errorf("failed to marshal message of type %q: %v", messageName, err) |
|||
} |
|||
if fieldSet["type_url"] { |
|||
return p.errorf(anyRepeatedlyUnpacked, "type_url") |
|||
} |
|||
if fieldSet["value"] { |
|||
return p.errorf(anyRepeatedlyUnpacked, "value") |
|||
} |
|||
sv.FieldByName("TypeUrl").SetString(extName) |
|||
sv.FieldByName("Value").SetBytes(b) |
|||
fieldSet["type_url"] = true |
|||
fieldSet["value"] = true |
|||
continue |
|||
} |
|||
|
|||
var desc *ExtensionDesc |
|||
// This could be faster, but it's functional.
|
|||
// TODO: Do something smarter than a linear scan.
|
|||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { |
|||
if d.Name == extName { |
|||
desc = d |
|||
break |
|||
} |
|||
} |
|||
if desc == nil { |
|||
return p.errorf("unrecognized extension %q", extName) |
|||
} |
|||
|
|||
props := &Properties{} |
|||
props.Parse(desc.Tag) |
|||
|
|||
typ := reflect.TypeOf(desc.ExtensionType) |
|||
if err := p.checkForColon(props, typ); err != nil { |
|||
return err |
|||
} |
|||
|
|||
rep := desc.repeated() |
|||
|
|||
// Read the extension structure, and set it in
|
|||
// the value we're constructing.
|
|||
var ext reflect.Value |
|||
if !rep { |
|||
ext = reflect.New(typ).Elem() |
|||
} else { |
|||
ext = reflect.New(typ.Elem()).Elem() |
|||
} |
|||
if err := p.readAny(ext, props); err != nil { |
|||
if _, ok := err.(*RequiredNotSetError); !ok { |
|||
return err |
|||
} |
|||
reqFieldErr = err |
|||
} |
|||
ep := sv.Addr().Interface().(Message) |
|||
if !rep { |
|||
SetExtension(ep, desc, ext.Interface()) |
|||
} else { |
|||
old, err := GetExtension(ep, desc) |
|||
var sl reflect.Value |
|||
if err == nil { |
|||
sl = reflect.ValueOf(old) // existing slice
|
|||
} else { |
|||
sl = reflect.MakeSlice(typ, 0, 1) |
|||
} |
|||
sl = reflect.Append(sl, ext) |
|||
SetExtension(ep, desc, sl.Interface()) |
|||
} |
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return err |
|||
} |
|||
continue |
|||
} |
|||
|
|||
// This is a normal, non-extension field.
|
|||
name := tok.value |
|||
var dst reflect.Value |
|||
fi, props, ok := structFieldByName(sprops, name) |
|||
if ok { |
|||
dst = sv.Field(fi) |
|||
} else if oop, ok := sprops.OneofTypes[name]; ok { |
|||
// It is a oneof.
|
|||
props = oop.Prop |
|||
nv := reflect.New(oop.Type.Elem()) |
|||
dst = nv.Elem().Field(0) |
|||
sv.Field(oop.Field).Set(nv) |
|||
} |
|||
if !dst.IsValid() { |
|||
return p.errorf("unknown field name %q in %v", name, st) |
|||
} |
|||
|
|||
if dst.Kind() == reflect.Map { |
|||
// Consume any colon.
|
|||
if err := p.checkForColon(props, dst.Type()); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// Construct the map if it doesn't already exist.
|
|||
if dst.IsNil() { |
|||
dst.Set(reflect.MakeMap(dst.Type())) |
|||
} |
|||
key := reflect.New(dst.Type().Key()).Elem() |
|||
val := reflect.New(dst.Type().Elem()).Elem() |
|||
|
|||
// The map entry should be this sequence of tokens:
|
|||
// < key : KEY value : VALUE >
|
|||
// However, implementations may omit key or value, and technically
|
|||
// we should support them in any order. See b/28924776 for a time
|
|||
// this went wrong.
|
|||
|
|||
tok := p.next() |
|||
var terminator string |
|||
switch tok.value { |
|||
case "<": |
|||
terminator = ">" |
|||
case "{": |
|||
terminator = "}" |
|||
default: |
|||
return p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
for { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value == terminator { |
|||
break |
|||
} |
|||
switch tok.value { |
|||
case "key": |
|||
if err := p.consumeToken(":"); err != nil { |
|||
return err |
|||
} |
|||
if err := p.readAny(key, props.mkeyprop); err != nil { |
|||
return err |
|||
} |
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return err |
|||
} |
|||
case "value": |
|||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { |
|||
return err |
|||
} |
|||
if err := p.readAny(val, props.mvalprop); err != nil { |
|||
return err |
|||
} |
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return err |
|||
} |
|||
default: |
|||
p.back() |
|||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) |
|||
} |
|||
} |
|||
|
|||
dst.SetMapIndex(key, val) |
|||
continue |
|||
} |
|||
|
|||
// Check that it's not already set if it's not a repeated field.
|
|||
if !props.Repeated && fieldSet[name] { |
|||
return p.errorf("non-repeated field %q was repeated", name) |
|||
} |
|||
|
|||
if err := p.checkForColon(props, dst.Type()); err != nil { |
|||
return err |
|||
} |
|||
|
|||
// Parse into the field.
|
|||
fieldSet[name] = true |
|||
if err := p.readAny(dst, props); err != nil { |
|||
if _, ok := err.(*RequiredNotSetError); !ok { |
|||
return err |
|||
} |
|||
reqFieldErr = err |
|||
} |
|||
if props.Required { |
|||
reqCount-- |
|||
} |
|||
|
|||
if err := p.consumeOptionalSeparator(); err != nil { |
|||
return err |
|||
} |
|||
|
|||
} |
|||
|
|||
if reqCount > 0 { |
|||
return p.missingRequiredFieldError(sv) |
|||
} |
|||
return reqFieldErr |
|||
} |
|||
|
|||
// consumeExtName consumes extension name or expanded Any type URL and the
|
|||
// following ']'. It returns the name or URL consumed.
|
|||
func (p *textParser) consumeExtName() (string, error) { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return "", tok.err |
|||
} |
|||
|
|||
// If extension name or type url is quoted, it's a single token.
|
|||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { |
|||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) |
|||
if err != nil { |
|||
return "", err |
|||
} |
|||
return name, p.consumeToken("]") |
|||
} |
|||
|
|||
// Consume everything up to "]"
|
|||
var parts []string |
|||
for tok.value != "]" { |
|||
parts = append(parts, tok.value) |
|||
tok = p.next() |
|||
if tok.err != nil { |
|||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) |
|||
} |
|||
} |
|||
return strings.Join(parts, ""), nil |
|||
} |
|||
|
|||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|||
// It is used in readStruct to provide backward compatibility.
|
|||
func (p *textParser) consumeOptionalSeparator() error { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value != ";" && tok.value != "," { |
|||
p.back() |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (p *textParser) readAny(v reflect.Value, props *Properties) error { |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value == "" { |
|||
return p.errorf("unexpected EOF") |
|||
} |
|||
|
|||
switch fv := v; fv.Kind() { |
|||
case reflect.Slice: |
|||
at := v.Type() |
|||
if at.Elem().Kind() == reflect.Uint8 { |
|||
// Special case for []byte
|
|||
if tok.value[0] != '"' && tok.value[0] != '\'' { |
|||
// Deliberately written out here, as the error after
|
|||
// this switch statement would write "invalid []byte: ...",
|
|||
// which is not as user-friendly.
|
|||
return p.errorf("invalid string: %v", tok.value) |
|||
} |
|||
bytes := []byte(tok.unquoted) |
|||
fv.Set(reflect.ValueOf(bytes)) |
|||
return nil |
|||
} |
|||
// Repeated field.
|
|||
if tok.value == "[" { |
|||
// Repeated field with list notation, like [1,2,3].
|
|||
for { |
|||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) |
|||
err := p.readAny(fv.Index(fv.Len()-1), props) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
tok := p.next() |
|||
if tok.err != nil { |
|||
return tok.err |
|||
} |
|||
if tok.value == "]" { |
|||
break |
|||
} |
|||
if tok.value != "," { |
|||
return p.errorf("Expected ']' or ',' found %q", tok.value) |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
// One value of the repeated field.
|
|||
p.back() |
|||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) |
|||
return p.readAny(fv.Index(fv.Len()-1), props) |
|||
case reflect.Bool: |
|||
// true/1/t/True or false/f/0/False.
|
|||
switch tok.value { |
|||
case "true", "1", "t", "True": |
|||
fv.SetBool(true) |
|||
return nil |
|||
case "false", "0", "f", "False": |
|||
fv.SetBool(false) |
|||
return nil |
|||
} |
|||
case reflect.Float32, reflect.Float64: |
|||
v := tok.value |
|||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
|||
// remove 'f' when the value is "-inf" or "inf".
|
|||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { |
|||
v = v[:len(v)-1] |
|||
} |
|||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { |
|||
fv.SetFloat(f) |
|||
return nil |
|||
} |
|||
case reflect.Int32: |
|||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { |
|||
fv.SetInt(x) |
|||
return nil |
|||
} |
|||
|
|||
if len(props.Enum) == 0 { |
|||
break |
|||
} |
|||
m, ok := enumValueMaps[props.Enum] |
|||
if !ok { |
|||
break |
|||
} |
|||
x, ok := m[tok.value] |
|||
if !ok { |
|||
break |
|||
} |
|||
fv.SetInt(int64(x)) |
|||
return nil |
|||
case reflect.Int64: |
|||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { |
|||
fv.SetInt(x) |
|||
return nil |
|||
} |
|||
|
|||
case reflect.Ptr: |
|||
// A basic field (indirected through pointer), or a repeated message/group
|
|||
p.back() |
|||
fv.Set(reflect.New(fv.Type().Elem())) |
|||
return p.readAny(fv.Elem(), props) |
|||
case reflect.String: |
|||
if tok.value[0] == '"' || tok.value[0] == '\'' { |
|||
fv.SetString(tok.unquoted) |
|||
return nil |
|||
} |
|||
case reflect.Struct: |
|||
var terminator string |
|||
switch tok.value { |
|||
case "{": |
|||
terminator = "}" |
|||
case "<": |
|||
terminator = ">" |
|||
default: |
|||
return p.errorf("expected '{' or '<', found %q", tok.value) |
|||
} |
|||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
|||
return p.readStruct(fv, terminator) |
|||
case reflect.Uint32: |
|||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { |
|||
fv.SetUint(uint64(x)) |
|||
return nil |
|||
} |
|||
case reflect.Uint64: |
|||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { |
|||
fv.SetUint(x) |
|||
return nil |
|||
} |
|||
} |
|||
return p.errorf("invalid %v: %v", v.Type(), tok.value) |
|||
} |
|||
|
|||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
|||
// before starting to unmarshal, so any existing data in pb is always removed.
|
|||
// If a required field is not set and no other error occurs,
|
|||
// UnmarshalText returns *RequiredNotSetError.
|
|||
func UnmarshalText(s string, pb Message) error { |
|||
if um, ok := pb.(encoding.TextUnmarshaler); ok { |
|||
err := um.UnmarshalText([]byte(s)) |
|||
return err |
|||
} |
|||
pb.Reset() |
|||
v := reflect.ValueOf(pb) |
|||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { |
|||
return pe |
|||
} |
|||
return nil |
|||
} |
|||
@ -0,0 +1,662 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"math" |
|||
"reflect" |
|||
"testing" |
|||
|
|||
. "github.com/golang/protobuf/proto" |
|||
proto3pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
. "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
type UnmarshalTextTest struct { |
|||
in string |
|||
err string // if "", no error expected
|
|||
out *MyMessage |
|||
} |
|||
|
|||
func buildExtStructTest(text string) UnmarshalTextTest { |
|||
msg := &MyMessage{ |
|||
Count: Int32(42), |
|||
} |
|||
SetExtension(msg, E_Ext_More, &Ext{ |
|||
Data: String("Hello, world!"), |
|||
}) |
|||
return UnmarshalTextTest{in: text, out: msg} |
|||
} |
|||
|
|||
func buildExtDataTest(text string) UnmarshalTextTest { |
|||
msg := &MyMessage{ |
|||
Count: Int32(42), |
|||
} |
|||
SetExtension(msg, E_Ext_Text, String("Hello, world!")) |
|||
SetExtension(msg, E_Ext_Number, Int32(1729)) |
|||
return UnmarshalTextTest{in: text, out: msg} |
|||
} |
|||
|
|||
func buildExtRepStringTest(text string) UnmarshalTextTest { |
|||
msg := &MyMessage{ |
|||
Count: Int32(42), |
|||
} |
|||
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { |
|||
panic(err) |
|||
} |
|||
return UnmarshalTextTest{in: text, out: msg} |
|||
} |
|||
|
|||
var unMarshalTextTests = []UnmarshalTextTest{ |
|||
// Basic
|
|||
{ |
|||
in: " count:42\n name:\"Dave\" ", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("Dave"), |
|||
}, |
|||
}, |
|||
|
|||
// Empty quoted string
|
|||
{ |
|||
in: `count:42 name:""`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String(""), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string concatenation with double quotes
|
|||
{ |
|||
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("My name is elsewhere"), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string concatenation with single quotes
|
|||
{ |
|||
in: "count:42 name: 'My name is '\n'elsewhere'", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("My name is elsewhere"), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string concatenations with mixed quotes
|
|||
{ |
|||
in: "count:42 name: 'My name is '\n\"elsewhere\"", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("My name is elsewhere"), |
|||
}, |
|||
}, |
|||
{ |
|||
in: "count:42 name: \"My name is \"\n'elsewhere'", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("My name is elsewhere"), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string with escaped apostrophe
|
|||
{ |
|||
in: `count:42 name: "HOLIDAY - New Year\'s Day"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("HOLIDAY - New Year's Day"), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string with single quote
|
|||
{ |
|||
in: `count:42 name: 'Roger "The Ramster" Ramjet'`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String(`Roger "The Ramster" Ramjet`), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string with all the accepted special characters from the C++ test
|
|||
{ |
|||
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string with quoted backslash
|
|||
{ |
|||
in: `count:42 name: "\\'xyz"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String(`\'xyz`), |
|||
}, |
|||
}, |
|||
|
|||
// Quoted string with UTF-8 bytes.
|
|||
{ |
|||
in: "count:42 name: '\303\277\302\201\xAB'", |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("\303\277\302\201\xAB"), |
|||
}, |
|||
}, |
|||
|
|||
// Bad quoted string
|
|||
{ |
|||
in: `inner: < host: "\0" >` + "\n", |
|||
err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, |
|||
}, |
|||
|
|||
// Number too large for int64
|
|||
{ |
|||
in: "count: 1 others { key: 123456789012345678901 }", |
|||
err: "line 1.23: invalid int64: 123456789012345678901", |
|||
}, |
|||
|
|||
// Number too large for int32
|
|||
{ |
|||
in: "count: 1234567890123", |
|||
err: "line 1.7: invalid int32: 1234567890123", |
|||
}, |
|||
|
|||
// Number in hexadecimal
|
|||
{ |
|||
in: "count: 0x2beef", |
|||
out: &MyMessage{ |
|||
Count: Int32(0x2beef), |
|||
}, |
|||
}, |
|||
|
|||
// Number in octal
|
|||
{ |
|||
in: "count: 024601", |
|||
out: &MyMessage{ |
|||
Count: Int32(024601), |
|||
}, |
|||
}, |
|||
|
|||
// Floating point number with "f" suffix
|
|||
{ |
|||
in: "count: 4 others:< weight: 17.0f >", |
|||
out: &MyMessage{ |
|||
Count: Int32(4), |
|||
Others: []*OtherMessage{ |
|||
{ |
|||
Weight: Float32(17), |
|||
}, |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
// Floating point positive infinity
|
|||
{ |
|||
in: "count: 4 bigfloat: inf", |
|||
out: &MyMessage{ |
|||
Count: Int32(4), |
|||
Bigfloat: Float64(math.Inf(1)), |
|||
}, |
|||
}, |
|||
|
|||
// Floating point negative infinity
|
|||
{ |
|||
in: "count: 4 bigfloat: -inf", |
|||
out: &MyMessage{ |
|||
Count: Int32(4), |
|||
Bigfloat: Float64(math.Inf(-1)), |
|||
}, |
|||
}, |
|||
|
|||
// Number too large for float32
|
|||
{ |
|||
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", |
|||
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", |
|||
}, |
|||
|
|||
// Number posing as a quoted string
|
|||
{ |
|||
in: `inner: < host: 12 >` + "\n", |
|||
err: `line 1.15: invalid string: 12`, |
|||
}, |
|||
|
|||
// Quoted string posing as int32
|
|||
{ |
|||
in: `count: "12"`, |
|||
err: `line 1.7: invalid int32: "12"`, |
|||
}, |
|||
|
|||
// Quoted string posing a float32
|
|||
{ |
|||
in: `others:< weight: "17.4" >`, |
|||
err: `line 1.17: invalid float32: "17.4"`, |
|||
}, |
|||
|
|||
// Enum
|
|||
{ |
|||
in: `count:42 bikeshed: BLUE`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Bikeshed: MyMessage_BLUE.Enum(), |
|||
}, |
|||
}, |
|||
|
|||
// Repeated field
|
|||
{ |
|||
in: `count:42 pet: "horsey" pet:"bunny"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Pet: []string{"horsey", "bunny"}, |
|||
}, |
|||
}, |
|||
|
|||
// Repeated field with list notation
|
|||
{ |
|||
in: `count:42 pet: ["horsey", "bunny"]`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Pet: []string{"horsey", "bunny"}, |
|||
}, |
|||
}, |
|||
|
|||
// Repeated message with/without colon and <>/{}
|
|||
{ |
|||
in: `count:42 others:{} others{} others:<> others:{}`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Others: []*OtherMessage{ |
|||
{}, |
|||
{}, |
|||
{}, |
|||
{}, |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
// Missing colon for inner message
|
|||
{ |
|||
in: `count:42 inner < host: "cauchy.syd" >`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("cauchy.syd"), |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
// Missing colon for string field
|
|||
{ |
|||
in: `name "Dave"`, |
|||
err: `line 1.5: expected ':', found "\"Dave\""`, |
|||
}, |
|||
|
|||
// Missing colon for int32 field
|
|||
{ |
|||
in: `count 42`, |
|||
err: `line 1.6: expected ':', found "42"`, |
|||
}, |
|||
|
|||
// Missing required field
|
|||
{ |
|||
in: `name: "Pawel"`, |
|||
err: `proto: required field "testdata.MyMessage.count" not set`, |
|||
out: &MyMessage{ |
|||
Name: String("Pawel"), |
|||
}, |
|||
}, |
|||
|
|||
// Missing required field in a required submessage
|
|||
{ |
|||
in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, |
|||
err: `proto: required field "testdata.InnerMessage.host" not set`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, |
|||
}, |
|||
}, |
|||
|
|||
// Repeated non-repeated field
|
|||
{ |
|||
in: `name: "Rob" name: "Russ"`, |
|||
err: `line 1.12: non-repeated field "name" was repeated`, |
|||
}, |
|||
|
|||
// Group
|
|||
{ |
|||
in: `count: 17 SomeGroup { group_field: 12 }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(17), |
|||
Somegroup: &MyMessage_SomeGroup{ |
|||
GroupField: Int32(12), |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
// Semicolon between fields
|
|||
{ |
|||
in: `count:3;name:"Calvin"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(3), |
|||
Name: String("Calvin"), |
|||
}, |
|||
}, |
|||
// Comma between fields
|
|||
{ |
|||
in: `count:4,name:"Ezekiel"`, |
|||
out: &MyMessage{ |
|||
Count: Int32(4), |
|||
Name: String("Ezekiel"), |
|||
}, |
|||
}, |
|||
|
|||
// Boolean false
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: false }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(false), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean true
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: true }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(true), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean 0
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: 0 }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(false), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean 1
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: 1 }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(true), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean f
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: f }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(false), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean t
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: t }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(true), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean False
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: False }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(false), |
|||
}, |
|||
}, |
|||
}, |
|||
// Boolean True
|
|||
{ |
|||
in: `count:42 inner { host: "example.com" connected: True }`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Inner: &InnerMessage{ |
|||
Host: String("example.com"), |
|||
Connected: Bool(true), |
|||
}, |
|||
}, |
|||
}, |
|||
|
|||
// Extension
|
|||
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`), |
|||
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), |
|||
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), |
|||
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), |
|||
|
|||
// Big all-in-one
|
|||
{ |
|||
in: "count:42 # Meaning\n" + |
|||
`name:"Dave" ` + |
|||
`quote:"\"I didn't want to go.\"" ` + |
|||
`pet:"bunny" ` + |
|||
`pet:"kitty" ` + |
|||
`pet:"horsey" ` + |
|||
`inner:<` + |
|||
` host:"footrest.syd" ` + |
|||
` port:7001 ` + |
|||
` connected:true ` + |
|||
`> ` + |
|||
`others:<` + |
|||
` key:3735928559 ` + |
|||
` value:"\x01A\a\f" ` + |
|||
`> ` + |
|||
`others:<` + |
|||
" weight:58.9 # Atomic weight of Co\n" + |
|||
` inner:<` + |
|||
` host:"lesha.mtv" ` + |
|||
` port:8002 ` + |
|||
` >` + |
|||
`>`, |
|||
out: &MyMessage{ |
|||
Count: Int32(42), |
|||
Name: String("Dave"), |
|||
Quote: String(`"I didn't want to go."`), |
|||
Pet: []string{"bunny", "kitty", "horsey"}, |
|||
Inner: &InnerMessage{ |
|||
Host: String("footrest.syd"), |
|||
Port: Int32(7001), |
|||
Connected: Bool(true), |
|||
}, |
|||
Others: []*OtherMessage{ |
|||
{ |
|||
Key: Int64(3735928559), |
|||
Value: []byte{0x1, 'A', '\a', '\f'}, |
|||
}, |
|||
{ |
|||
Weight: Float32(58.9), |
|||
Inner: &InnerMessage{ |
|||
Host: String("lesha.mtv"), |
|||
Port: Int32(8002), |
|||
}, |
|||
}, |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
|
|||
func TestUnmarshalText(t *testing.T) { |
|||
for i, test := range unMarshalTextTests { |
|||
pb := new(MyMessage) |
|||
err := UnmarshalText(test.in, pb) |
|||
if test.err == "" { |
|||
// We don't expect failure.
|
|||
if err != nil { |
|||
t.Errorf("Test %d: Unexpected error: %v", i, err) |
|||
} else if !reflect.DeepEqual(pb, test.out) { |
|||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", |
|||
i, pb, test.out) |
|||
} |
|||
} else { |
|||
// We do expect failure.
|
|||
if err == nil { |
|||
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) |
|||
} else if err.Error() != test.err { |
|||
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", |
|||
i, err.Error(), test.err) |
|||
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { |
|||
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", |
|||
i, pb, test.out) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestUnmarshalTextCustomMessage(t *testing.T) { |
|||
msg := &textMessage{} |
|||
if err := UnmarshalText("custom", msg); err != nil { |
|||
t.Errorf("Unexpected error from custom unmarshal: %v", err) |
|||
} |
|||
if UnmarshalText("not custom", msg) == nil { |
|||
t.Errorf("Didn't get expected error from custom unmarshal") |
|||
} |
|||
} |
|||
|
|||
// Regression test; this caused a panic.
|
|||
func TestRepeatedEnum(t *testing.T) { |
|||
pb := new(RepeatedEnum) |
|||
if err := UnmarshalText("color: RED", pb); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
exp := &RepeatedEnum{ |
|||
Color: []RepeatedEnum_Color{RepeatedEnum_RED}, |
|||
} |
|||
if !Equal(pb, exp) { |
|||
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) |
|||
} |
|||
} |
|||
|
|||
func TestProto3TextParsing(t *testing.T) { |
|||
m := new(proto3pb.Message) |
|||
const in = `name: "Wallace" true_scotsman: true` |
|||
want := &proto3pb.Message{ |
|||
Name: "Wallace", |
|||
TrueScotsman: true, |
|||
} |
|||
if err := UnmarshalText(in, m); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if !Equal(m, want) { |
|||
t.Errorf("\n got %v\nwant %v", m, want) |
|||
} |
|||
} |
|||
|
|||
func TestMapParsing(t *testing.T) { |
|||
m := new(MessageWithMap) |
|||
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` + |
|||
`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay
|
|||
`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after "value"
|
|||
`msg_mapping:<value:<f: 5.0>>` + // omitted key
|
|||
`msg_mapping:<key:1>` + // omitted value
|
|||
`byte_mapping:<key:true value:"so be it">` + |
|||
`byte_mapping:<>` // omitted key and value
|
|||
want := &MessageWithMap{ |
|||
NameMapping: map[int32]string{ |
|||
1: "Beatles", |
|||
1234: "Feist", |
|||
}, |
|||
MsgMapping: map[int64]*FloatingPoint{ |
|||
-4: {F: Float64(2.0)}, |
|||
-2: {F: Float64(4.0)}, |
|||
0: {F: Float64(5.0)}, |
|||
1: nil, |
|||
}, |
|||
ByteMapping: map[bool][]byte{ |
|||
false: nil, |
|||
true: []byte("so be it"), |
|||
}, |
|||
} |
|||
if err := UnmarshalText(in, m); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if !Equal(m, want) { |
|||
t.Errorf("\n got %v\nwant %v", m, want) |
|||
} |
|||
} |
|||
|
|||
func TestOneofParsing(t *testing.T) { |
|||
const in = `name:"Shrek"` |
|||
m := new(Communique) |
|||
want := &Communique{Union: &Communique_Name{"Shrek"}} |
|||
if err := UnmarshalText(in, m); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if !Equal(m, want) { |
|||
t.Errorf("\n got %v\nwant %v", m, want) |
|||
} |
|||
} |
|||
|
|||
var benchInput string |
|||
|
|||
func init() { |
|||
benchInput = "count: 4\n" |
|||
for i := 0; i < 1000; i++ { |
|||
benchInput += "pet: \"fido\"\n" |
|||
} |
|||
|
|||
// Check it is valid input.
|
|||
pb := new(MyMessage) |
|||
err := UnmarshalText(benchInput, pb) |
|||
if err != nil { |
|||
panic("Bad benchmark input: " + err.Error()) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkUnmarshalText(b *testing.B) { |
|||
pb := new(MyMessage) |
|||
for i := 0; i < b.N; i++ { |
|||
UnmarshalText(benchInput, pb) |
|||
} |
|||
b.SetBytes(int64(len(benchInput))) |
|||
} |
|||
@ -0,0 +1,474 @@ |
|||
// Go support for Protocol Buffers - Google's data interchange format
|
|||
//
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
|||
// https://github.com/golang/protobuf
|
|||
//
|
|||
// Redistribution and use in source and binary forms, with or without
|
|||
// modification, are permitted provided that the following conditions are
|
|||
// met:
|
|||
//
|
|||
// * Redistributions of source code must retain the above copyright
|
|||
// notice, this list of conditions and the following disclaimer.
|
|||
// * Redistributions in binary form must reproduce the above
|
|||
// copyright notice, this list of conditions and the following disclaimer
|
|||
// in the documentation and/or other materials provided with the
|
|||
// distribution.
|
|||
// * Neither the name of Google Inc. nor the names of its
|
|||
// contributors may be used to endorse or promote products derived from
|
|||
// this software without specific prior written permission.
|
|||
//
|
|||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
|||
package proto_test |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"io/ioutil" |
|||
"math" |
|||
"strings" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
proto3pb "github.com/golang/protobuf/proto/proto3_proto" |
|||
pb "github.com/golang/protobuf/proto/testdata" |
|||
) |
|||
|
|||
// textMessage implements the methods that allow it to marshal and unmarshal
|
|||
// itself as text.
|
|||
type textMessage struct { |
|||
} |
|||
|
|||
func (*textMessage) MarshalText() ([]byte, error) { |
|||
return []byte("custom"), nil |
|||
} |
|||
|
|||
func (*textMessage) UnmarshalText(bytes []byte) error { |
|||
if string(bytes) != "custom" { |
|||
return errors.New("expected 'custom'") |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (*textMessage) Reset() {} |
|||
func (*textMessage) String() string { return "" } |
|||
func (*textMessage) ProtoMessage() {} |
|||
|
|||
func newTestMessage() *pb.MyMessage { |
|||
msg := &pb.MyMessage{ |
|||
Count: proto.Int32(42), |
|||
Name: proto.String("Dave"), |
|||
Quote: proto.String(`"I didn't want to go."`), |
|||
Pet: []string{"bunny", "kitty", "horsey"}, |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("footrest.syd"), |
|||
Port: proto.Int32(7001), |
|||
Connected: proto.Bool(true), |
|||
}, |
|||
Others: []*pb.OtherMessage{ |
|||
{ |
|||
Key: proto.Int64(0xdeadbeef), |
|||
Value: []byte{1, 65, 7, 12}, |
|||
}, |
|||
{ |
|||
Weight: proto.Float32(6.022), |
|||
Inner: &pb.InnerMessage{ |
|||
Host: proto.String("lesha.mtv"), |
|||
Port: proto.Int32(8002), |
|||
}, |
|||
}, |
|||
}, |
|||
Bikeshed: pb.MyMessage_BLUE.Enum(), |
|||
Somegroup: &pb.MyMessage_SomeGroup{ |
|||
GroupField: proto.Int32(8), |
|||
}, |
|||
// One normally wouldn't do this.
|
|||
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
|
|||
XXX_unrecognized: []byte{13<<3 | 0, 4}, |
|||
} |
|||
ext := &pb.Ext{ |
|||
Data: proto.String("Big gobs for big rats"), |
|||
} |
|||
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { |
|||
panic(err) |
|||
} |
|||
greetings := []string{"adg", "easy", "cow"} |
|||
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { |
|||
panic(err) |
|||
} |
|||
|
|||
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
|
|||
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) |
|||
proto.SetRawExtension(msg, 201, b) |
|||
|
|||
// Extensions can be plain fields, too, so let's test that.
|
|||
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) |
|||
proto.SetRawExtension(msg, 202, b) |
|||
|
|||
return msg |
|||
} |
|||
|
|||
const text = `count: 42 |
|||
name: "Dave" |
|||
quote: "\"I didn't want to go.\"" |
|||
pet: "bunny" |
|||
pet: "kitty" |
|||
pet: "horsey" |
|||
inner: < |
|||
host: "footrest.syd" |
|||
port: 7001 |
|||
connected: true |
|||
> |
|||
others: < |
|||
key: 3735928559 |
|||
value: "\001A\007\014" |
|||
> |
|||
others: < |
|||
weight: 6.022 |
|||
inner: < |
|||
host: "lesha.mtv" |
|||
port: 8002 |
|||
> |
|||
> |
|||
bikeshed: BLUE |
|||
SomeGroup { |
|||
group_field: 8 |
|||
} |
|||
/* 2 unknown bytes */ |
|||
13: 4 |
|||
[testdata.Ext.more]: < |
|||
data: "Big gobs for big rats" |
|||
> |
|||
[testdata.greeting]: "adg" |
|||
[testdata.greeting]: "easy" |
|||
[testdata.greeting]: "cow" |
|||
/* 13 unknown bytes */ |
|||
201: "\t3G skiing" |
|||
/* 3 unknown bytes */ |
|||
202: 19 |
|||
` |
|||
|
|||
func TestMarshalText(t *testing.T) { |
|||
buf := new(bytes.Buffer) |
|||
if err := proto.MarshalText(buf, newTestMessage()); err != nil { |
|||
t.Fatalf("proto.MarshalText: %v", err) |
|||
} |
|||
s := buf.String() |
|||
if s != text { |
|||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) |
|||
} |
|||
} |
|||
|
|||
func TestMarshalTextCustomMessage(t *testing.T) { |
|||
buf := new(bytes.Buffer) |
|||
if err := proto.MarshalText(buf, &textMessage{}); err != nil { |
|||
t.Fatalf("proto.MarshalText: %v", err) |
|||
} |
|||
s := buf.String() |
|||
if s != "custom" { |
|||
t.Errorf("Got %q, expected %q", s, "custom") |
|||
} |
|||
} |
|||
func TestMarshalTextNil(t *testing.T) { |
|||
want := "<nil>" |
|||
tests := []proto.Message{nil, (*pb.MyMessage)(nil)} |
|||
for i, test := range tests { |
|||
buf := new(bytes.Buffer) |
|||
if err := proto.MarshalText(buf, test); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if got := buf.String(); got != want { |
|||
t.Errorf("%d: got %q want %q", i, got, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestMarshalTextUnknownEnum(t *testing.T) { |
|||
// The Color enum only specifies values 0-2.
|
|||
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} |
|||
got := m.String() |
|||
const want = `bikeshed:3 ` |
|||
if got != want { |
|||
t.Errorf("\n got %q\nwant %q", got, want) |
|||
} |
|||
} |
|||
|
|||
func TestTextOneof(t *testing.T) { |
|||
tests := []struct { |
|||
m proto.Message |
|||
want string |
|||
}{ |
|||
// zero message
|
|||
{&pb.Communique{}, ``}, |
|||
// scalar field
|
|||
{&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`}, |
|||
// message field
|
|||
{&pb.Communique{Union: &pb.Communique_Msg{ |
|||
&pb.Strings{StringField: proto.String("why hello!")}, |
|||
}}, `msg:<string_field:"why hello!" >`}, |
|||
// bad oneof (should not panic)
|
|||
{&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`}, |
|||
} |
|||
for _, test := range tests { |
|||
got := strings.TrimSpace(test.m.String()) |
|||
if got != test.want { |
|||
t.Errorf("\n got %s\nwant %s", got, test.want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func BenchmarkMarshalTextBuffered(b *testing.B) { |
|||
buf := new(bytes.Buffer) |
|||
m := newTestMessage() |
|||
for i := 0; i < b.N; i++ { |
|||
buf.Reset() |
|||
proto.MarshalText(buf, m) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkMarshalTextUnbuffered(b *testing.B) { |
|||
w := ioutil.Discard |
|||
m := newTestMessage() |
|||
for i := 0; i < b.N; i++ { |
|||
proto.MarshalText(w, m) |
|||
} |
|||
} |
|||
|
|||
func compact(src string) string { |
|||
// s/[ \n]+/ /g; s/ $//;
|
|||
dst := make([]byte, len(src)) |
|||
space, comment := false, false |
|||
j := 0 |
|||
for i := 0; i < len(src); i++ { |
|||
if strings.HasPrefix(src[i:], "/*") { |
|||
comment = true |
|||
i++ |
|||
continue |
|||
} |
|||
if comment && strings.HasPrefix(src[i:], "*/") { |
|||
comment = false |
|||
i++ |
|||
continue |
|||
} |
|||
if comment { |
|||
continue |
|||
} |
|||
c := src[i] |
|||
if c == ' ' || c == '\n' { |
|||
space = true |
|||
continue |
|||
} |
|||
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { |
|||
space = false |
|||
} |
|||
if c == '{' { |
|||
space = false |
|||
} |
|||
if space { |
|||
dst[j] = ' ' |
|||
j++ |
|||
space = false |
|||
} |
|||
dst[j] = c |
|||
j++ |
|||
} |
|||
if space { |
|||
dst[j] = ' ' |
|||
j++ |
|||
} |
|||
return string(dst[0:j]) |
|||
} |
|||
|
|||
var compactText = compact(text) |
|||
|
|||
func TestCompactText(t *testing.T) { |
|||
s := proto.CompactTextString(newTestMessage()) |
|||
if s != compactText { |
|||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) |
|||
} |
|||
} |
|||
|
|||
func TestStringEscaping(t *testing.T) { |
|||
testCases := []struct { |
|||
in *pb.Strings |
|||
out string |
|||
}{ |
|||
{ |
|||
// Test data from C++ test (TextFormatTest.StringEscape).
|
|||
// Single divergence: we don't escape apostrophes.
|
|||
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, |
|||
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", |
|||
}, |
|||
{ |
|||
// Test data from the same C++ test.
|
|||
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, |
|||
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n", |
|||
}, |
|||
{ |
|||
// Some UTF-8.
|
|||
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, |
|||
`string_field: "\000\001\377\201"` + "\n", |
|||
}, |
|||
} |
|||
|
|||
for i, tc := range testCases { |
|||
var buf bytes.Buffer |
|||
if err := proto.MarshalText(&buf, tc.in); err != nil { |
|||
t.Errorf("proto.MarsalText: %v", err) |
|||
continue |
|||
} |
|||
s := buf.String() |
|||
if s != tc.out { |
|||
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) |
|||
continue |
|||
} |
|||
|
|||
// Check round-trip.
|
|||
pb := new(pb.Strings) |
|||
if err := proto.UnmarshalText(s, pb); err != nil { |
|||
t.Errorf("#%d: UnmarshalText: %v", i, err) |
|||
continue |
|||
} |
|||
if !proto.Equal(pb, tc.in) { |
|||
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// A limitedWriter accepts some output before it fails.
|
|||
// This is a proxy for something like a nearly-full or imminently-failing disk,
|
|||
// or a network connection that is about to die.
|
|||
type limitedWriter struct { |
|||
b bytes.Buffer |
|||
limit int |
|||
} |
|||
|
|||
var outOfSpace = errors.New("proto: insufficient space") |
|||
|
|||
func (w *limitedWriter) Write(p []byte) (n int, err error) { |
|||
var avail = w.limit - w.b.Len() |
|||
if avail <= 0 { |
|||
return 0, outOfSpace |
|||
} |
|||
if len(p) <= avail { |
|||
return w.b.Write(p) |
|||
} |
|||
n, _ = w.b.Write(p[:avail]) |
|||
return n, outOfSpace |
|||
} |
|||
|
|||
func TestMarshalTextFailing(t *testing.T) { |
|||
// Try lots of different sizes to exercise more error code-paths.
|
|||
for lim := 0; lim < len(text); lim++ { |
|||
buf := new(limitedWriter) |
|||
buf.limit = lim |
|||
err := proto.MarshalText(buf, newTestMessage()) |
|||
// We expect a certain error, but also some partial results in the buffer.
|
|||
if err != outOfSpace { |
|||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) |
|||
} |
|||
s := buf.b.String() |
|||
x := text[:buf.limit] |
|||
if s != x { |
|||
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestFloats(t *testing.T) { |
|||
tests := []struct { |
|||
f float64 |
|||
want string |
|||
}{ |
|||
{0, "0"}, |
|||
{4.7, "4.7"}, |
|||
{math.Inf(1), "inf"}, |
|||
{math.Inf(-1), "-inf"}, |
|||
{math.NaN(), "nan"}, |
|||
} |
|||
for _, test := range tests { |
|||
msg := &pb.FloatingPoint{F: &test.f} |
|||
got := strings.TrimSpace(msg.String()) |
|||
want := `f:` + test.want |
|||
if got != want { |
|||
t.Errorf("f=%f: got %q, want %q", test.f, got, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestRepeatedNilText(t *testing.T) { |
|||
m := &pb.MessageList{ |
|||
Message: []*pb.MessageList_Message{ |
|||
nil, |
|||
&pb.MessageList_Message{ |
|||
Name: proto.String("Horse"), |
|||
}, |
|||
nil, |
|||
}, |
|||
} |
|||
want := `Message <nil> |
|||
Message { |
|||
name: "Horse" |
|||
} |
|||
Message <nil> |
|||
` |
|||
if s := proto.MarshalTextString(m); s != want { |
|||
t.Errorf(" got: %s\nwant: %s", s, want) |
|||
} |
|||
} |
|||
|
|||
func TestProto3Text(t *testing.T) { |
|||
tests := []struct { |
|||
m proto.Message |
|||
want string |
|||
}{ |
|||
// zero message
|
|||
{&proto3pb.Message{}, ``}, |
|||
// zero message except for an empty byte slice
|
|||
{&proto3pb.Message{Data: []byte{}}, ``}, |
|||
// trivial case
|
|||
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, |
|||
// empty map
|
|||
{&pb.MessageWithMap{}, ``}, |
|||
// non-empty map; map format is the same as a repeated struct,
|
|||
// and they are sorted by key (numerically for numeric keys).
|
|||
{ |
|||
&pb.MessageWithMap{NameMapping: map[int32]string{ |
|||
-1: "Negatory", |
|||
7: "Lucky", |
|||
1234: "Feist", |
|||
6345789: "Otis", |
|||
}}, |
|||
`name_mapping:<key:-1 value:"Negatory" > ` + |
|||
`name_mapping:<key:7 value:"Lucky" > ` + |
|||
`name_mapping:<key:1234 value:"Feist" > ` + |
|||
`name_mapping:<key:6345789 value:"Otis" >`, |
|||
}, |
|||
// map with nil value; not well-defined, but we shouldn't crash
|
|||
{ |
|||
&pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}}, |
|||
`msg_mapping:<key:7 >`, |
|||
}, |
|||
} |
|||
for _, test := range tests { |
|||
got := strings.TrimSpace(test.m.String()) |
|||
if got != test.want { |
|||
t.Errorf("\n got %s\nwant %s", got, test.want) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,7 @@ |
|||
all: |
|||
|
|||
cover: |
|||
go test -cover -v -coverprofile=cover.dat ./... |
|||
go tool cover -func cover.dat |
|||
|
|||
.PHONY: cover |
|||
@ -0,0 +1,178 @@ |
|||
// Copyright 2013 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package pbutil |
|||
|
|||
import ( |
|||
"bytes" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
. "github.com/matttproud/golang_protobuf_extensions/testdata" |
|||
) |
|||
|
|||
func TestWriteDelimited(t *testing.T) { |
|||
t.Parallel() |
|||
for _, test := range []struct { |
|||
msg proto.Message |
|||
buf []byte |
|||
n int |
|||
err error |
|||
}{ |
|||
{ |
|||
msg: &Empty{}, |
|||
n: 1, |
|||
buf: []byte{0}, |
|||
}, |
|||
{ |
|||
msg: &GoEnum{Foo: FOO_FOO1.Enum()}, |
|||
n: 3, |
|||
buf: []byte{2, 8, 1}, |
|||
}, |
|||
{ |
|||
msg: &Strings{ |
|||
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds |
|||
the encoding size of a single byte varint. We are using it to fuzz test the |
|||
correctness of the header decoding mechanisms, which may prove problematic. |
|||
I expect it may. Let's hope you enjoy testing as much as we do.`), |
|||
}, |
|||
n: 271, |
|||
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, |
|||
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, |
|||
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, |
|||
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, |
|||
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, |
|||
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, |
|||
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, |
|||
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, |
|||
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, |
|||
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, |
|||
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, |
|||
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, |
|||
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, |
|||
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, |
|||
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, |
|||
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, |
|||
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, |
|||
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, |
|||
}, |
|||
} { |
|||
var buf bytes.Buffer |
|||
if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { |
|||
t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) |
|||
} |
|||
if out := buf.Bytes(); !bytes.Equal(out, test.buf) { |
|||
t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestReadDelimited(t *testing.T) { |
|||
t.Parallel() |
|||
for _, test := range []struct { |
|||
buf []byte |
|||
msg proto.Message |
|||
n int |
|||
err error |
|||
}{ |
|||
{ |
|||
buf: []byte{0}, |
|||
msg: &Empty{}, |
|||
n: 1, |
|||
}, |
|||
{ |
|||
n: 3, |
|||
buf: []byte{2, 8, 1}, |
|||
msg: &GoEnum{Foo: FOO_FOO1.Enum()}, |
|||
}, |
|||
{ |
|||
buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, |
|||
121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, |
|||
97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, |
|||
116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, |
|||
110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, |
|||
32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, |
|||
118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, |
|||
117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, |
|||
122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, |
|||
101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, |
|||
104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, |
|||
32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, |
|||
105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, |
|||
114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, |
|||
112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, |
|||
116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, |
|||
106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, |
|||
109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, |
|||
msg: &Strings{ |
|||
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds |
|||
the encoding size of a single byte varint. We are using it to fuzz test the |
|||
correctness of the header decoding mechanisms, which may prove problematic. |
|||
I expect it may. Let's hope you enjoy testing as much as we do.`), |
|||
}, |
|||
n: 271, |
|||
}, |
|||
} { |
|||
msg := proto.Clone(test.msg) |
|||
msg.Reset() |
|||
if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { |
|||
t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) |
|||
} |
|||
if !proto.Equal(msg, test.msg) { |
|||
t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestEndToEndValid(t *testing.T) { |
|||
t.Parallel() |
|||
for _, test := range [][]proto.Message{ |
|||
{&Empty{}}, |
|||
{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, |
|||
{&GoEnum{Foo: FOO_FOO1.Enum()}}, |
|||
{&Strings{ |
|||
StringField: proto.String(`This is my gigantic, unhappy string. It exceeds |
|||
the encoding size of a single byte varint. We are using it to fuzz test the |
|||
correctness of the header decoding mechanisms, which may prove problematic. |
|||
I expect it may. Let's hope you enjoy testing as much as we do.`), |
|||
}}, |
|||
} { |
|||
var buf bytes.Buffer |
|||
var written int |
|||
for i, msg := range test { |
|||
n, err := WriteDelimited(&buf, msg) |
|||
if err != nil { |
|||
// Assumption: TestReadDelimited and TestWriteDelimited are sufficient
|
|||
// and inputs for this test are explicitly exercised there.
|
|||
t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) |
|||
} |
|||
written += n |
|||
} |
|||
var read int |
|||
for i, msg := range test { |
|||
out := proto.Clone(msg) |
|||
out.Reset() |
|||
n, _ := ReadDelimited(&buf, out) |
|||
// Decide to do EOF checking?
|
|||
read += n |
|||
if !proto.Equal(out, msg) { |
|||
t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) |
|||
} |
|||
} |
|||
if read != written { |
|||
t.Fatalf("%v read = %d; want %d", test, read, written) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,75 @@ |
|||
// Copyright 2013 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package pbutil |
|||
|
|||
import ( |
|||
"encoding/binary" |
|||
"errors" |
|||
"io" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
) |
|||
|
|||
var errInvalidVarint = errors.New("invalid varint32 encountered") |
|||
|
|||
// ReadDelimited decodes a message from the provided length-delimited stream,
|
|||
// where the length is encoded as 32-bit varint prefix to the message body.
|
|||
// It returns the total number of bytes read and any applicable error. This is
|
|||
// roughly equivalent to the companion Java API's
|
|||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
|||
// calls r.Read repeatedly as required until exactly one message including its
|
|||
// prefix is read and decoded (or an error has occurred). The function never
|
|||
// reads more bytes from the stream than required. The function never returns
|
|||
// an error if a message has been read and decoded correctly, even if the end
|
|||
// of the stream has been reached in doing so. In that case, any subsequent
|
|||
// calls return (0, io.EOF).
|
|||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { |
|||
// Per AbstractParser#parsePartialDelimitedFrom with
|
|||
// CodedInputStream#readRawVarint32.
|
|||
var headerBuf [binary.MaxVarintLen32]byte |
|||
var bytesRead, varIntBytes int |
|||
var messageLength uint64 |
|||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
|||
if bytesRead >= len(headerBuf) { |
|||
return bytesRead, errInvalidVarint |
|||
} |
|||
// We have to read byte by byte here to avoid reading more bytes
|
|||
// than required. Each read byte is appended to what we have
|
|||
// read before.
|
|||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) |
|||
if newBytesRead == 0 { |
|||
if err != nil { |
|||
return bytesRead, err |
|||
} |
|||
// A Reader should not return (0, nil), but if it does,
|
|||
// it should be treated as no-op (according to the
|
|||
// Reader contract). So let's go on...
|
|||
continue |
|||
} |
|||
bytesRead += newBytesRead |
|||
// Now present everything read so far to the varint decoder and
|
|||
// see if a varint can be decoded already.
|
|||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) |
|||
} |
|||
|
|||
messageBuf := make([]byte, messageLength) |
|||
newBytesRead, err := io.ReadFull(r, messageBuf) |
|||
bytesRead += newBytesRead |
|||
if err != nil { |
|||
return bytesRead, err |
|||
} |
|||
|
|||
return bytesRead, proto.Unmarshal(messageBuf, m) |
|||
} |
|||
@ -0,0 +1,99 @@ |
|||
// Copyright 2016 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package pbutil |
|||
|
|||
import ( |
|||
"bytes" |
|||
"io" |
|||
"testing" |
|||
"testing/iotest" |
|||
) |
|||
|
|||
func TestReadDelimitedIllegalVarint(t *testing.T) { |
|||
t.Parallel() |
|||
var tests = []struct { |
|||
in []byte |
|||
n int |
|||
err error |
|||
}{ |
|||
{ |
|||
in: []byte{255, 255, 255, 255, 255}, |
|||
n: 5, |
|||
err: errInvalidVarint, |
|||
}, |
|||
{ |
|||
in: []byte{255, 255, 255, 255, 255, 255}, |
|||
n: 5, |
|||
err: errInvalidVarint, |
|||
}, |
|||
} |
|||
for _, test := range tests { |
|||
n, err := ReadDelimited(bytes.NewReader(test.in), nil) |
|||
if got, want := n, test.n; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want) |
|||
} |
|||
if got, want := err, test.err; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestReadDelimitedPrematureHeader(t *testing.T) { |
|||
t.Parallel() |
|||
var data = []byte{128, 5} // 256 + 256 + 128
|
|||
n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil) |
|||
if got, want := n, 1; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) |
|||
} |
|||
if got, want := err, io.EOF; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) |
|||
} |
|||
} |
|||
|
|||
func TestReadDelimitedPrematureBody(t *testing.T) { |
|||
t.Parallel() |
|||
var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
|
|||
n, err := ReadDelimited(bytes.NewReader(data[:]), nil) |
|||
if got, want := n, 5; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) |
|||
} |
|||
if got, want := err, io.ErrUnexpectedEOF; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) |
|||
} |
|||
} |
|||
|
|||
func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) { |
|||
t.Parallel() |
|||
var data = []byte{128, 5} // 256 + 256 + 128
|
|||
n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil) |
|||
if got, want := n, 1; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) |
|||
} |
|||
if got, want := err, io.EOF; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) |
|||
} |
|||
} |
|||
|
|||
func TestReadDelimitedPrematureBodyIncremental(t *testing.T) { |
|||
t.Parallel() |
|||
var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
|
|||
n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil) |
|||
if got, want := n, 5; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) |
|||
} |
|||
if got, want := err, io.ErrUnexpectedEOF; got != want { |
|||
t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) |
|||
} |
|||
} |
|||
@ -0,0 +1,16 @@ |
|||
// Copyright 2013 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package pbutil provides record length-delimited Protocol Buffer streaming.
|
|||
package pbutil |
|||
@ -0,0 +1,46 @@ |
|||
// Copyright 2013 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package pbutil |
|||
|
|||
import ( |
|||
"encoding/binary" |
|||
"io" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
) |
|||
|
|||
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
|||
// with a 32-bit varint indicating the length of the encoded message, producing
|
|||
// a length-delimited record stream, which can be used to chain together
|
|||
// encoded messages of the same type together in a file. It returns the total
|
|||
// number of bytes written and any applicable error. This is roughly
|
|||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
|||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { |
|||
buffer, err := proto.Marshal(m) |
|||
if err != nil { |
|||
return 0, err |
|||
} |
|||
|
|||
var buf [binary.MaxVarintLen32]byte |
|||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) |
|||
|
|||
sync, err := w.Write(buf[:encodedLength]) |
|||
if err != nil { |
|||
return sync, err |
|||
} |
|||
|
|||
n, err = w.Write(buffer) |
|||
return n + sync, err |
|||
} |
|||
@ -0,0 +1,67 @@ |
|||
// Copyright 2016 Matt T. Proud
|
|||
//
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package pbutil |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"testing" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
) |
|||
|
|||
var errMarshal = errors.New("pbutil: can't marshal") |
|||
|
|||
type cantMarshal struct{ proto.Message } |
|||
|
|||
func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal } |
|||
|
|||
var _ proto.Message = cantMarshal{} |
|||
|
|||
func TestWriteDelimitedMarshalErr(t *testing.T) { |
|||
t.Parallel() |
|||
var data cantMarshal |
|||
var buf bytes.Buffer |
|||
n, err := WriteDelimited(&buf, data) |
|||
if got, want := n, 0; got != want { |
|||
t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) |
|||
} |
|||
if got, want := err, errMarshal; got != want { |
|||
t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) |
|||
} |
|||
} |
|||
|
|||
type canMarshal struct{ proto.Message } |
|||
|
|||
func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil } |
|||
|
|||
var errWrite = errors.New("pbutil: can't write") |
|||
|
|||
type cantWrite struct{} |
|||
|
|||
func (cantWrite) Write([]byte) (int, error) { return 0, errWrite } |
|||
|
|||
func TestWriteDelimitedWriteErr(t *testing.T) { |
|||
t.Parallel() |
|||
var data canMarshal |
|||
var buf cantWrite |
|||
n, err := WriteDelimited(buf, data) |
|||
if got, want := n, 0; got != want { |
|||
t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) |
|||
} |
|||
if got, want := err, errWrite; got != want { |
|||
t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) |
|||
} |
|||
} |
|||
@ -0,0 +1,18 @@ |
|||
The Prometheus project was started by Matt T. Proud (emeritus) and |
|||
Julius Volz in 2012. |
|||
|
|||
Maintainers of this repository: |
|||
|
|||
* Björn Rabenstein <beorn@soundcloud.com> |
|||
|
|||
The following individuals have contributed code to this repository |
|||
(listed in alphabetical order): |
|||
|
|||
* Bernerd Schaefer <bj.schaefer@gmail.com> |
|||
* Björn Rabenstein <beorn@soundcloud.com> |
|||
* Daniel Bornkessel <daniel@soundcloud.com> |
|||
* Jeff Younker <jeff@drinktomi.com> |
|||
* Julius Volz <julius.volz@gmail.com> |
|||
* Matt T. Proud <matt.proud@gmail.com> |
|||
* Tobias Schmidt <ts@soundcloud.com> |
|||
|
|||
@ -0,0 +1,109 @@ |
|||
## 0.8.0 / 2016-08-17 |
|||
* [CHANGE] Registry is doing more consistency checks. This might break |
|||
existing setups that used to export inconsistent metrics. |
|||
* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow |
|||
arbitrary grouping. |
|||
* [CHANGE] Removed `SelfCollector`. |
|||
* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. |
|||
* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, |
|||
`extraction`. |
|||
* [CHANGE] Deprecated a number of functions. |
|||
* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` |
|||
interfaces. |
|||
* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package |
|||
`promhttp`) and enabling the creation of other exposition mechanisms. |
|||
* [FEATURE] `MustRegister` is variadic now, allowing registration of many |
|||
collectors in one call. |
|||
* [FEATURE] Added HTTP API v1 package. |
|||
* [ENHANCEMENT] Numerous documentation improvements. |
|||
* [ENHANCEMENT] Improved metric sorting. |
|||
* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. |
|||
* [ENHANCEMENT] Several test improvements. |
|||
* [BUGFIX] Handle collisions in MetricVec. |
|||
|
|||
## 0.7.0 / 2015-07-27 |
|||
* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. |
|||
* [BUGFIX] Closed gaps in metric consistency check. |
|||
* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. |
|||
* [ENHANCEMENT] Document the possibility to create "empty" metrics in |
|||
a metric vector. |
|||
* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. |
|||
* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. |
|||
* [ENHANCEMENT] Change responseWriterDelegator.written to int64. |
|||
|
|||
## 0.6.0 / 2015-06-01 |
|||
* [CHANGE] Rename process_goroutines to go_goroutines. |
|||
* [ENHANCEMENT] Validate label names during YAML decoding. |
|||
* [ENHANCEMENT] Add LabelName regular expression. |
|||
* [BUGFIX] Ensure alignment of struct members for 32-bit systems. |
|||
|
|||
## 0.5.0 / 2015-05-06 |
|||
* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. |
|||
This makes fingerprinting slower and more allocation-heavy, but the |
|||
weakness was too severe to be tolerated. |
|||
* [CHANGE] As a result of the above, Metric.Fingerprint is now returning |
|||
a different fingerprint. To keep the same fingerprint, the new method |
|||
Metric.FastFingerprint was introduced, which will be used by the |
|||
Prometheus server for storage purposes (implying that a collision |
|||
detection has to be added, too). |
|||
* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on |
|||
fingerprinting anymore, removing the possibility of an undetected |
|||
fingerprint collision. |
|||
* [FEATURE] The Go collector in the exposition library includes garbage |
|||
collection stats. |
|||
* [FEATURE] The exposition library allows to create constant "throw-away" |
|||
summaries and histograms. |
|||
* [CHANGE] A number of new reserved labels and prefixes. |
|||
|
|||
## 0.4.0 / 2015-04-08 |
|||
* [CHANGE] Return NaN when Summaries have no observations yet. |
|||
* [BUGFIX] Properly handle Summary decay upon Write(). |
|||
* [BUGFIX] Fix the documentation link to the consumption library. |
|||
* [FEATURE] Allow the metric family injection hook to merge with existing |
|||
metric families. |
|||
* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. |
|||
* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. |
|||
|
|||
## 0.3.2 / 2015-03-11 |
|||
* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is |
|||
only used by the Prometheus server internally. |
|||
* [CLEANUP] Added licenses of vendored code left out by godep. |
|||
|
|||
## 0.3.1 / 2015-03-04 |
|||
* [ENHANCEMENT] Switched fingerprinting functions from own free list to |
|||
sync.Pool. |
|||
* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). |
|||
|
|||
## 0.3.0 / 2015-03-03 |
|||
* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL |
|||
PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS |
|||
VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. |
|||
* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was |
|||
arguably broken.) |
|||
* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If |
|||
client_golang is used as a library, the vendoring will stay out of your way. |
|||
* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made |
|||
the fingerprinting change above necessary.) |
|||
* [FEATURE] Added new fingerprinting functions SignatureForLabels and |
|||
SignatureWithoutLabels to be used by the Prometheus server. These functions |
|||
require fewer allocations than the ones currently used by the server. |
|||
|
|||
## 0.2.0 / 2015-02-23 |
|||
* [FEATURE] Introduce new Histagram metric type. |
|||
* [CHANGE] Ignore process collector errors for now (better error handling |
|||
pending). |
|||
* [CHANGE] Use clear error interface for process pidFn. |
|||
* [BUGFIX] Fix Go download links for several archs and OSes. |
|||
* [ENHANCEMENT] Massively improve Gauge and Counter performance. |
|||
* [ENHANCEMENT] Catch illegal label names for summaries in histograms. |
|||
* [ENHANCEMENT] Reduce allocations during fingerprinting. |
|||
* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if |
|||
both cgo is available and the build is for an OS with procfs. |
|||
* [CLEANUP] Clean up code style issues. |
|||
* [CLEANUP] Mark slow test as such and exclude them from travis. |
|||
* [CLEANUP] Update protobuf library package name. |
|||
* [CLEANUP] Updated vendoring of beorn7/perks. |
|||
|
|||
## 0.1.0 / 2015-02-02 |
|||
* [CLEANUP] Introduced semantic versioning and changelog. From now on, |
|||
changes will be reported in this file. |
|||
@ -0,0 +1,18 @@ |
|||
# Contributing |
|||
|
|||
Prometheus uses GitHub to manage reviews of pull requests. |
|||
|
|||
* If you have a trivial fix or improvement, go ahead and create a pull |
|||
request, addressing (with `@...`) one or more of the maintainers |
|||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. |
|||
|
|||
* If you plan to do something more involved, first discuss your ideas |
|||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). |
|||
This will avoid unnecessary work and surely give you and us a good deal |
|||
of inspiration. |
|||
|
|||
* Relevant coding style guidelines are the [Go Code Review |
|||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) |
|||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best |
|||
Practices for Production |
|||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). |
|||
@ -0,0 +1,201 @@ |
|||
Apache License |
|||
Version 2.0, January 2004 |
|||
http://www.apache.org/licenses/ |
|||
|
|||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
|||
|
|||
1. Definitions. |
|||
|
|||
"License" shall mean the terms and conditions for use, reproduction, |
|||
and distribution as defined by Sections 1 through 9 of this document. |
|||
|
|||
"Licensor" shall mean the copyright owner or entity authorized by |
|||
the copyright owner that is granting the License. |
|||
|
|||
"Legal Entity" shall mean the union of the acting entity and all |
|||
other entities that control, are controlled by, or are under common |
|||
control with that entity. For the purposes of this definition, |
|||
"control" means (i) the power, direct or indirect, to cause the |
|||
direction or management of such entity, whether by contract or |
|||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
|||
outstanding shares, or (iii) beneficial ownership of such entity. |
|||
|
|||
"You" (or "Your") shall mean an individual or Legal Entity |
|||
exercising permissions granted by this License. |
|||
|
|||
"Source" form shall mean the preferred form for making modifications, |
|||
including but not limited to software source code, documentation |
|||
source, and configuration files. |
|||
|
|||
"Object" form shall mean any form resulting from mechanical |
|||
transformation or translation of a Source form, including but |
|||
not limited to compiled object code, generated documentation, |
|||
and conversions to other media types. |
|||
|
|||
"Work" shall mean the work of authorship, whether in Source or |
|||
Object form, made available under the License, as indicated by a |
|||
copyright notice that is included in or attached to the work |
|||
(an example is provided in the Appendix below). |
|||
|
|||
"Derivative Works" shall mean any work, whether in Source or Object |
|||
form, that is based on (or derived from) the Work and for which the |
|||
editorial revisions, annotations, elaborations, or other modifications |
|||
represent, as a whole, an original work of authorship. For the purposes |
|||
of this License, Derivative Works shall not include works that remain |
|||
separable from, or merely link (or bind by name) to the interfaces of, |
|||
the Work and Derivative Works thereof. |
|||
|
|||
"Contribution" shall mean any work of authorship, including |
|||
the original version of the Work and any modifications or additions |
|||
to that Work or Derivative Works thereof, that is intentionally |
|||
submitted to Licensor for inclusion in the Work by the copyright owner |
|||
or by an individual or Legal Entity authorized to submit on behalf of |
|||
the copyright owner. For the purposes of this definition, "submitted" |
|||
means any form of electronic, verbal, or written communication sent |
|||
to the Licensor or its representatives, including but not limited to |
|||
communication on electronic mailing lists, source code control systems, |
|||
and issue tracking systems that are managed by, or on behalf of, the |
|||
Licensor for the purpose of discussing and improving the Work, but |
|||
excluding communication that is conspicuously marked or otherwise |
|||
designated in writing by the copyright owner as "Not a Contribution." |
|||
|
|||
"Contributor" shall mean Licensor and any individual or Legal Entity |
|||
on behalf of whom a Contribution has been received by Licensor and |
|||
subsequently incorporated within the Work. |
|||
|
|||
2. Grant of Copyright License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
copyright license to reproduce, prepare Derivative Works of, |
|||
publicly display, publicly perform, sublicense, and distribute the |
|||
Work and such Derivative Works in Source or Object form. |
|||
|
|||
3. Grant of Patent License. Subject to the terms and conditions of |
|||
this License, each Contributor hereby grants to You a perpetual, |
|||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
|||
(except as stated in this section) patent license to make, have made, |
|||
use, offer to sell, sell, import, and otherwise transfer the Work, |
|||
where such license applies only to those patent claims licensable |
|||
by such Contributor that are necessarily infringed by their |
|||
Contribution(s) alone or by combination of their Contribution(s) |
|||
with the Work to which such Contribution(s) was submitted. If You |
|||
institute patent litigation against any entity (including a |
|||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
|||
or a Contribution incorporated within the Work constitutes direct |
|||
or contributory patent infringement, then any patent licenses |
|||
granted to You under this License for that Work shall terminate |
|||
as of the date such litigation is filed. |
|||
|
|||
4. Redistribution. You may reproduce and distribute copies of the |
|||
Work or Derivative Works thereof in any medium, with or without |
|||
modifications, and in Source or Object form, provided that You |
|||
meet the following conditions: |
|||
|
|||
(a) You must give any other recipients of the Work or |
|||
Derivative Works a copy of this License; and |
|||
|
|||
(b) You must cause any modified files to carry prominent notices |
|||
stating that You changed the files; and |
|||
|
|||
(c) You must retain, in the Source form of any Derivative Works |
|||
that You distribute, all copyright, patent, trademark, and |
|||
attribution notices from the Source form of the Work, |
|||
excluding those notices that do not pertain to any part of |
|||
the Derivative Works; and |
|||
|
|||
(d) If the Work includes a "NOTICE" text file as part of its |
|||
distribution, then any Derivative Works that You distribute must |
|||
include a readable copy of the attribution notices contained |
|||
within such NOTICE file, excluding those notices that do not |
|||
pertain to any part of the Derivative Works, in at least one |
|||
of the following places: within a NOTICE text file distributed |
|||
as part of the Derivative Works; within the Source form or |
|||
documentation, if provided along with the Derivative Works; or, |
|||
within a display generated by the Derivative Works, if and |
|||
wherever such third-party notices normally appear. The contents |
|||
of the NOTICE file are for informational purposes only and |
|||
do not modify the License. You may add Your own attribution |
|||
notices within Derivative Works that You distribute, alongside |
|||
or as an addendum to the NOTICE text from the Work, provided |
|||
that such additional attribution notices cannot be construed |
|||
as modifying the License. |
|||
|
|||
You may add Your own copyright statement to Your modifications and |
|||
may provide additional or different license terms and conditions |
|||
for use, reproduction, or distribution of Your modifications, or |
|||
for any such Derivative Works as a whole, provided Your use, |
|||
reproduction, and distribution of the Work otherwise complies with |
|||
the conditions stated in this License. |
|||
|
|||
5. Submission of Contributions. Unless You explicitly state otherwise, |
|||
any Contribution intentionally submitted for inclusion in the Work |
|||
by You to the Licensor shall be under the terms and conditions of |
|||
this License, without any additional terms or conditions. |
|||
Notwithstanding the above, nothing herein shall supersede or modify |
|||
the terms of any separate license agreement you may have executed |
|||
with Licensor regarding such Contributions. |
|||
|
|||
6. Trademarks. This License does not grant permission to use the trade |
|||
names, trademarks, service marks, or product names of the Licensor, |
|||
except as required for reasonable and customary use in describing the |
|||
origin of the Work and reproducing the content of the NOTICE file. |
|||
|
|||
7. Disclaimer of Warranty. Unless required by applicable law or |
|||
agreed to in writing, Licensor provides the Work (and each |
|||
Contributor provides its Contributions) on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
|||
implied, including, without limitation, any warranties or conditions |
|||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
|||
PARTICULAR PURPOSE. You are solely responsible for determining the |
|||
appropriateness of using or redistributing the Work and assume any |
|||
risks associated with Your exercise of permissions under this License. |
|||
|
|||
8. Limitation of Liability. In no event and under no legal theory, |
|||
whether in tort (including negligence), contract, or otherwise, |
|||
unless required by applicable law (such as deliberate and grossly |
|||
negligent acts) or agreed to in writing, shall any Contributor be |
|||
liable to You for damages, including any direct, indirect, special, |
|||
incidental, or consequential damages of any character arising as a |
|||
result of this License or out of the use or inability to use the |
|||
Work (including but not limited to damages for loss of goodwill, |
|||
work stoppage, computer failure or malfunction, or any and all |
|||
other commercial damages or losses), even if such Contributor |
|||
has been advised of the possibility of such damages. |
|||
|
|||
9. Accepting Warranty or Additional Liability. While redistributing |
|||
the Work or Derivative Works thereof, You may choose to offer, |
|||
and charge a fee for, acceptance of support, warranty, indemnity, |
|||
or other liability obligations and/or rights consistent with this |
|||
License. However, in accepting such obligations, You may act only |
|||
on Your own behalf and on Your sole responsibility, not on behalf |
|||
of any other Contributor, and only if You agree to indemnify, |
|||
defend, and hold each Contributor harmless for any liability |
|||
incurred by, or claims asserted against, such Contributor by reason |
|||
of your accepting any such warranty or additional liability. |
|||
|
|||
END OF TERMS AND CONDITIONS |
|||
|
|||
APPENDIX: How to apply the Apache License to your work. |
|||
|
|||
To apply the Apache License to your work, attach the following |
|||
boilerplate notice, with the fields enclosed by brackets "[]" |
|||
replaced with your own identifying information. (Don't include |
|||
the brackets!) The text should be enclosed in the appropriate |
|||
comment syntax for the file format. We also recommend that a |
|||
file or class name and description of purpose be included on the |
|||
same "printed page" as the copyright notice for easier |
|||
identification within third-party archives. |
|||
|
|||
Copyright [yyyy] [name of copyright owner] |
|||
|
|||
Licensed under the Apache License, Version 2.0 (the "License"); |
|||
you may not use this file except in compliance with the License. |
|||
You may obtain a copy of the License at |
|||
|
|||
http://www.apache.org/licenses/LICENSE-2.0 |
|||
|
|||
Unless required by applicable law or agreed to in writing, software |
|||
distributed under the License is distributed on an "AS IS" BASIS, |
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
See the License for the specific language governing permissions and |
|||
limitations under the License. |
|||
@ -0,0 +1,23 @@ |
|||
Prometheus instrumentation library for Go applications |
|||
Copyright 2012-2015 The Prometheus Authors |
|||
|
|||
This product includes software developed at |
|||
SoundCloud Ltd. (http://soundcloud.com/). |
|||
|
|||
|
|||
The following components are included in this product: |
|||
|
|||
perks - a fork of https://github.com/bmizerany/perks |
|||
https://github.com/beorn7/perks |
|||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein |
|||
See https://github.com/beorn7/perks/blob/master/README.md for license details. |
|||
|
|||
Go support for Protocol Buffers - Google's data interchange format |
|||
http://github.com/golang/protobuf/ |
|||
Copyright 2010 The Go Authors |
|||
See source code for license details. |
|||
|
|||
Support for streaming Protocol Buffer messages for the Go language (golang). |
|||
https://github.com/matttproud/golang_protobuf_extensions |
|||
Copyright 2013 Matt T. Proud |
|||
Licensed under the Apache License, Version 2.0 |
|||
@ -0,0 +1,46 @@ |
|||
# Prometheus Go client library |
|||
|
|||
[](https://travis-ci.org/prometheus/client_golang) |
|||
[](https://goreportcard.com/report/github.com/prometheus/client_golang) |
|||
|
|||
This is the [Go](http://golang.org) client library for |
|||
[Prometheus](http://prometheus.io). It has two separate parts, one for |
|||
instrumenting application code, and one for creating clients that talk to the |
|||
Prometheus HTTP API. |
|||
|
|||
## Instrumenting applications |
|||
|
|||
[](http://gocover.io/github.com/prometheus/client_golang/prometheus) [](https://godoc.org/github.com/prometheus/client_golang/prometheus) |
|||
|
|||
The |
|||
[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) |
|||
contains the instrumentation library. See the |
|||
[best practices section](http://prometheus.io/docs/practices/naming/) of the |
|||
Prometheus documentation to learn more about instrumenting applications. |
|||
|
|||
The |
|||
[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) |
|||
contains simple examples of instrumented code. |
|||
|
|||
## Client for the Prometheus HTTP API |
|||
|
|||
[](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) |
|||
|
|||
The |
|||
[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) |
|||
contains the client for the |
|||
[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you |
|||
to write Go applications that query time series data from a Prometheus server. |
|||
|
|||
## Where is `model`, `extraction`, and `text`? |
|||
|
|||
The `model` packages has been moved to |
|||
[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). |
|||
|
|||
The `extraction` and `text` packages are now contained in |
|||
[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). |
|||
|
|||
## Contributing and community |
|||
|
|||
See the [contributing guidelines](CONTRIBUTING.md) and the |
|||
[Community section](http://prometheus.io/community/) of the homepage. |
|||
@ -0,0 +1 @@ |
|||
0.8.0 |
|||
@ -0,0 +1,348 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package prometheus provides bindings to the Prometheus HTTP API:
|
|||
// http://prometheus.io/docs/querying/api/
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net" |
|||
"net/http" |
|||
"net/url" |
|||
"path" |
|||
"strconv" |
|||
"strings" |
|||
"time" |
|||
|
|||
"github.com/prometheus/common/model" |
|||
"golang.org/x/net/context" |
|||
"golang.org/x/net/context/ctxhttp" |
|||
) |
|||
|
|||
const ( |
|||
statusAPIError = 422 |
|||
apiPrefix = "/api/v1" |
|||
|
|||
epQuery = "/query" |
|||
epQueryRange = "/query_range" |
|||
epLabelValues = "/label/:name/values" |
|||
epSeries = "/series" |
|||
) |
|||
|
|||
// ErrorType models the different API error types.
|
|||
type ErrorType string |
|||
|
|||
// Possible values for ErrorType.
|
|||
const ( |
|||
ErrBadData ErrorType = "bad_data" |
|||
ErrTimeout = "timeout" |
|||
ErrCanceled = "canceled" |
|||
ErrExec = "execution" |
|||
ErrBadResponse = "bad_response" |
|||
) |
|||
|
|||
// Error is an error returned by the API.
|
|||
type Error struct { |
|||
Type ErrorType |
|||
Msg string |
|||
} |
|||
|
|||
func (e *Error) Error() string { |
|||
return fmt.Sprintf("%s: %s", e.Type, e.Msg) |
|||
} |
|||
|
|||
// CancelableTransport is like net.Transport but provides
|
|||
// per-request cancelation functionality.
|
|||
type CancelableTransport interface { |
|||
http.RoundTripper |
|||
CancelRequest(req *http.Request) |
|||
} |
|||
|
|||
// DefaultTransport is used if no Transport is set in Config.
|
|||
var DefaultTransport CancelableTransport = &http.Transport{ |
|||
Proxy: http.ProxyFromEnvironment, |
|||
Dial: (&net.Dialer{ |
|||
Timeout: 30 * time.Second, |
|||
KeepAlive: 30 * time.Second, |
|||
}).Dial, |
|||
TLSHandshakeTimeout: 10 * time.Second, |
|||
} |
|||
|
|||
// Config defines configuration parameters for a new client.
|
|||
type Config struct { |
|||
// The address of the Prometheus to connect to.
|
|||
Address string |
|||
|
|||
// Transport is used by the Client to drive HTTP requests. If not
|
|||
// provided, DefaultTransport will be used.
|
|||
Transport CancelableTransport |
|||
} |
|||
|
|||
func (cfg *Config) transport() CancelableTransport { |
|||
if cfg.Transport == nil { |
|||
return DefaultTransport |
|||
} |
|||
return cfg.Transport |
|||
} |
|||
|
|||
// Client is the interface for an API client.
|
|||
type Client interface { |
|||
url(ep string, args map[string]string) *url.URL |
|||
do(context.Context, *http.Request) (*http.Response, []byte, error) |
|||
} |
|||
|
|||
// New returns a new Client.
|
|||
//
|
|||
// It is safe to use the returned Client from multiple goroutines.
|
|||
func New(cfg Config) (Client, error) { |
|||
u, err := url.Parse(cfg.Address) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
u.Path = strings.TrimRight(u.Path, "/") + apiPrefix |
|||
|
|||
return &httpClient{ |
|||
endpoint: u, |
|||
transport: cfg.transport(), |
|||
}, nil |
|||
} |
|||
|
|||
type httpClient struct { |
|||
endpoint *url.URL |
|||
transport CancelableTransport |
|||
} |
|||
|
|||
func (c *httpClient) url(ep string, args map[string]string) *url.URL { |
|||
p := path.Join(c.endpoint.Path, ep) |
|||
|
|||
for arg, val := range args { |
|||
arg = ":" + arg |
|||
p = strings.Replace(p, arg, val, -1) |
|||
} |
|||
|
|||
u := *c.endpoint |
|||
u.Path = p |
|||
|
|||
return &u |
|||
} |
|||
|
|||
func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { |
|||
resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req) |
|||
|
|||
defer func() { |
|||
if resp != nil { |
|||
resp.Body.Close() |
|||
} |
|||
}() |
|||
|
|||
if err != nil { |
|||
return nil, nil, err |
|||
} |
|||
|
|||
var body []byte |
|||
done := make(chan struct{}) |
|||
go func() { |
|||
body, err = ioutil.ReadAll(resp.Body) |
|||
close(done) |
|||
}() |
|||
|
|||
select { |
|||
case <-ctx.Done(): |
|||
err = resp.Body.Close() |
|||
<-done |
|||
if err == nil { |
|||
err = ctx.Err() |
|||
} |
|||
case <-done: |
|||
} |
|||
|
|||
return resp, body, err |
|||
} |
|||
|
|||
// apiClient wraps a regular client and processes successful API responses.
|
|||
// Successful also includes responses that errored at the API level.
|
|||
type apiClient struct { |
|||
Client |
|||
} |
|||
|
|||
type apiResponse struct { |
|||
Status string `json:"status"` |
|||
Data json.RawMessage `json:"data"` |
|||
ErrorType ErrorType `json:"errorType"` |
|||
Error string `json:"error"` |
|||
} |
|||
|
|||
func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { |
|||
resp, body, err := c.Client.do(ctx, req) |
|||
if err != nil { |
|||
return resp, body, err |
|||
} |
|||
|
|||
code := resp.StatusCode |
|||
|
|||
if code/100 != 2 && code != statusAPIError { |
|||
return resp, body, &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode), |
|||
} |
|||
} |
|||
|
|||
var result apiResponse |
|||
|
|||
if err = json.Unmarshal(body, &result); err != nil { |
|||
return resp, body, &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: err.Error(), |
|||
} |
|||
} |
|||
|
|||
if (code == statusAPIError) != (result.Status == "error") { |
|||
err = &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "inconsistent body for response code", |
|||
} |
|||
} |
|||
|
|||
if code == statusAPIError && result.Status == "error" { |
|||
err = &Error{ |
|||
Type: result.ErrorType, |
|||
Msg: result.Error, |
|||
} |
|||
} |
|||
|
|||
return resp, []byte(result.Data), err |
|||
} |
|||
|
|||
// Range represents a sliced time range.
|
|||
type Range struct { |
|||
// The boundaries of the time range.
|
|||
Start, End time.Time |
|||
// The maximum time between two slices within the boundaries.
|
|||
Step time.Duration |
|||
} |
|||
|
|||
// queryResult contains result data for a query.
|
|||
type queryResult struct { |
|||
Type model.ValueType `json:"resultType"` |
|||
Result interface{} `json:"result"` |
|||
|
|||
// The decoded value.
|
|||
v model.Value |
|||
} |
|||
|
|||
func (qr *queryResult) UnmarshalJSON(b []byte) error { |
|||
v := struct { |
|||
Type model.ValueType `json:"resultType"` |
|||
Result json.RawMessage `json:"result"` |
|||
}{} |
|||
|
|||
err := json.Unmarshal(b, &v) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
switch v.Type { |
|||
case model.ValScalar: |
|||
var sv model.Scalar |
|||
err = json.Unmarshal(v.Result, &sv) |
|||
qr.v = &sv |
|||
|
|||
case model.ValVector: |
|||
var vv model.Vector |
|||
err = json.Unmarshal(v.Result, &vv) |
|||
qr.v = vv |
|||
|
|||
case model.ValMatrix: |
|||
var mv model.Matrix |
|||
err = json.Unmarshal(v.Result, &mv) |
|||
qr.v = mv |
|||
|
|||
default: |
|||
err = fmt.Errorf("unexpected value type %q", v.Type) |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// QueryAPI provides bindings the Prometheus's query API.
|
|||
type QueryAPI interface { |
|||
// Query performs a query for the given time.
|
|||
Query(ctx context.Context, query string, ts time.Time) (model.Value, error) |
|||
// Query performs a query for the given range.
|
|||
QueryRange(ctx context.Context, query string, r Range) (model.Value, error) |
|||
} |
|||
|
|||
// NewQueryAPI returns a new QueryAPI for the client.
|
|||
//
|
|||
// It is safe to use the returned QueryAPI from multiple goroutines.
|
|||
func NewQueryAPI(c Client) QueryAPI { |
|||
return &httpQueryAPI{client: apiClient{c}} |
|||
} |
|||
|
|||
type httpQueryAPI struct { |
|||
client Client |
|||
} |
|||
|
|||
func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { |
|||
u := h.client.url(epQuery, nil) |
|||
q := u.Query() |
|||
|
|||
q.Set("query", query) |
|||
q.Set("time", ts.Format(time.RFC3339Nano)) |
|||
|
|||
u.RawQuery = q.Encode() |
|||
|
|||
req, _ := http.NewRequest("GET", u.String(), nil) |
|||
|
|||
_, body, err := h.client.do(ctx, req) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
var qres queryResult |
|||
err = json.Unmarshal(body, &qres) |
|||
|
|||
return model.Value(qres.v), err |
|||
} |
|||
|
|||
func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { |
|||
u := h.client.url(epQueryRange, nil) |
|||
q := u.Query() |
|||
|
|||
var ( |
|||
start = r.Start.Format(time.RFC3339Nano) |
|||
end = r.End.Format(time.RFC3339Nano) |
|||
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) |
|||
) |
|||
|
|||
q.Set("query", query) |
|||
q.Set("start", start) |
|||
q.Set("end", end) |
|||
q.Set("step", step) |
|||
|
|||
u.RawQuery = q.Encode() |
|||
|
|||
req, _ := http.NewRequest("GET", u.String(), nil) |
|||
|
|||
_, body, err := h.client.do(ctx, req) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
var qres queryResult |
|||
err = json.Unmarshal(body, &qres) |
|||
|
|||
return model.Value(qres.v), err |
|||
} |
|||
@ -0,0 +1,453 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"fmt" |
|||
"net/http" |
|||
"net/url" |
|||
"reflect" |
|||
"testing" |
|||
"time" |
|||
|
|||
"github.com/prometheus/common/model" |
|||
"golang.org/x/net/context" |
|||
) |
|||
|
|||
func TestConfig(t *testing.T) { |
|||
c := Config{} |
|||
if c.transport() != DefaultTransport { |
|||
t.Fatalf("expected default transport for nil Transport field") |
|||
} |
|||
} |
|||
|
|||
func TestClientURL(t *testing.T) { |
|||
tests := []struct { |
|||
address string |
|||
endpoint string |
|||
args map[string]string |
|||
expected string |
|||
}{ |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "/test", |
|||
expected: "http://localhost:9090/test", |
|||
}, |
|||
{ |
|||
address: "http://localhost", |
|||
endpoint: "/test", |
|||
expected: "http://localhost/test", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "test", |
|||
expected: "http://localhost:9090/test", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090/prefix", |
|||
endpoint: "/test", |
|||
expected: "http://localhost:9090/prefix/test", |
|||
}, |
|||
{ |
|||
address: "https://localhost:9090/", |
|||
endpoint: "/test/", |
|||
expected: "https://localhost:9090/test", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "/test/:param", |
|||
args: map[string]string{ |
|||
"param": "content", |
|||
}, |
|||
expected: "http://localhost:9090/test/content", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "/test/:param/more/:param", |
|||
args: map[string]string{ |
|||
"param": "content", |
|||
}, |
|||
expected: "http://localhost:9090/test/content/more/content", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "/test/:param/more/:foo", |
|||
args: map[string]string{ |
|||
"param": "content", |
|||
"foo": "bar", |
|||
}, |
|||
expected: "http://localhost:9090/test/content/more/bar", |
|||
}, |
|||
{ |
|||
address: "http://localhost:9090", |
|||
endpoint: "/test/:param", |
|||
args: map[string]string{ |
|||
"nonexistant": "content", |
|||
}, |
|||
expected: "http://localhost:9090/test/:param", |
|||
}, |
|||
} |
|||
|
|||
for _, test := range tests { |
|||
ep, err := url.Parse(test.address) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
hclient := &httpClient{ |
|||
endpoint: ep, |
|||
transport: DefaultTransport, |
|||
} |
|||
|
|||
u := hclient.url(test.endpoint, test.args) |
|||
if u.String() != test.expected { |
|||
t.Errorf("unexpected result: got %s, want %s", u, test.expected) |
|||
continue |
|||
} |
|||
|
|||
// The apiClient must return exactly the same result as the httpClient.
|
|||
aclient := &apiClient{hclient} |
|||
|
|||
u = aclient.url(test.endpoint, test.args) |
|||
if u.String() != test.expected { |
|||
t.Errorf("unexpected result: got %s, want %s", u, test.expected) |
|||
} |
|||
} |
|||
} |
|||
|
|||
type testClient struct { |
|||
*testing.T |
|||
|
|||
ch chan apiClientTest |
|||
req *http.Request |
|||
} |
|||
|
|||
type apiClientTest struct { |
|||
code int |
|||
response interface{} |
|||
expected string |
|||
err *Error |
|||
} |
|||
|
|||
func (c *testClient) url(ep string, args map[string]string) *url.URL { |
|||
return nil |
|||
} |
|||
|
|||
func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { |
|||
if ctx == nil { |
|||
c.Fatalf("context was not passed down") |
|||
} |
|||
if req != c.req { |
|||
c.Fatalf("request was not passed down") |
|||
} |
|||
|
|||
test := <-c.ch |
|||
|
|||
var b []byte |
|||
var err error |
|||
|
|||
switch v := test.response.(type) { |
|||
case string: |
|||
b = []byte(v) |
|||
default: |
|||
b, err = json.Marshal(v) |
|||
if err != nil { |
|||
c.Fatal(err) |
|||
} |
|||
} |
|||
|
|||
resp := &http.Response{ |
|||
StatusCode: test.code, |
|||
} |
|||
|
|||
return resp, b, nil |
|||
} |
|||
|
|||
func TestAPIClientDo(t *testing.T) { |
|||
tests := []apiClientTest{ |
|||
{ |
|||
response: &apiResponse{ |
|||
Status: "error", |
|||
Data: json.RawMessage(`null`), |
|||
ErrorType: ErrBadData, |
|||
Error: "failed", |
|||
}, |
|||
err: &Error{ |
|||
Type: ErrBadData, |
|||
Msg: "failed", |
|||
}, |
|||
code: statusAPIError, |
|||
expected: `null`, |
|||
}, |
|||
{ |
|||
response: &apiResponse{ |
|||
Status: "error", |
|||
Data: json.RawMessage(`"test"`), |
|||
ErrorType: ErrTimeout, |
|||
Error: "timed out", |
|||
}, |
|||
err: &Error{ |
|||
Type: ErrTimeout, |
|||
Msg: "timed out", |
|||
}, |
|||
code: statusAPIError, |
|||
expected: `test`, |
|||
}, |
|||
{ |
|||
response: "bad json", |
|||
err: &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "bad response code 400", |
|||
}, |
|||
code: http.StatusBadRequest, |
|||
}, |
|||
{ |
|||
response: "bad json", |
|||
err: &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "invalid character 'b' looking for beginning of value", |
|||
}, |
|||
code: statusAPIError, |
|||
}, |
|||
{ |
|||
response: &apiResponse{ |
|||
Status: "success", |
|||
Data: json.RawMessage(`"test"`), |
|||
}, |
|||
err: &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "inconsistent body for response code", |
|||
}, |
|||
code: statusAPIError, |
|||
}, |
|||
{ |
|||
response: &apiResponse{ |
|||
Status: "success", |
|||
Data: json.RawMessage(`"test"`), |
|||
ErrorType: ErrTimeout, |
|||
Error: "timed out", |
|||
}, |
|||
err: &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "inconsistent body for response code", |
|||
}, |
|||
code: statusAPIError, |
|||
}, |
|||
{ |
|||
response: &apiResponse{ |
|||
Status: "error", |
|||
Data: json.RawMessage(`"test"`), |
|||
ErrorType: ErrTimeout, |
|||
Error: "timed out", |
|||
}, |
|||
err: &Error{ |
|||
Type: ErrBadResponse, |
|||
Msg: "inconsistent body for response code", |
|||
}, |
|||
code: http.StatusOK, |
|||
}, |
|||
} |
|||
|
|||
tc := &testClient{ |
|||
T: t, |
|||
ch: make(chan apiClientTest, 1), |
|||
req: &http.Request{}, |
|||
} |
|||
client := &apiClient{tc} |
|||
|
|||
for _, test := range tests { |
|||
|
|||
tc.ch <- test |
|||
|
|||
_, body, err := client.do(context.Background(), tc.req) |
|||
|
|||
if test.err != nil { |
|||
if err == nil { |
|||
t.Errorf("expected error %q but got none", test.err) |
|||
continue |
|||
} |
|||
if test.err.Error() != err.Error() { |
|||
t.Errorf("unexpected error: want %q, got %q", test.err, err) |
|||
} |
|||
continue |
|||
} |
|||
if err != nil { |
|||
t.Errorf("unexpeceted error %s", err) |
|||
continue |
|||
} |
|||
|
|||
want, got := test.expected, string(body) |
|||
if want != got { |
|||
t.Errorf("unexpected body: want %q, got %q", want, got) |
|||
} |
|||
} |
|||
} |
|||
|
|||
type apiTestClient struct { |
|||
*testing.T |
|||
curTest apiTest |
|||
} |
|||
|
|||
type apiTest struct { |
|||
do func() (interface{}, error) |
|||
inErr error |
|||
inRes interface{} |
|||
|
|||
reqPath string |
|||
reqParam url.Values |
|||
reqMethod string |
|||
res interface{} |
|||
err error |
|||
} |
|||
|
|||
func (c *apiTestClient) url(ep string, args map[string]string) *url.URL { |
|||
u := &url.URL{ |
|||
Host: "test:9090", |
|||
Path: apiPrefix + ep, |
|||
} |
|||
return u |
|||
} |
|||
|
|||
func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { |
|||
|
|||
test := c.curTest |
|||
|
|||
if req.URL.Path != test.reqPath { |
|||
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) |
|||
} |
|||
if req.Method != test.reqMethod { |
|||
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) |
|||
} |
|||
|
|||
b, err := json.Marshal(test.inRes) |
|||
if err != nil { |
|||
c.Fatal(err) |
|||
} |
|||
|
|||
resp := &http.Response{} |
|||
if test.inErr != nil { |
|||
resp.StatusCode = statusAPIError |
|||
} else { |
|||
resp.StatusCode = http.StatusOK |
|||
} |
|||
|
|||
return resp, b, test.inErr |
|||
} |
|||
|
|||
func TestAPIs(t *testing.T) { |
|||
|
|||
testTime := time.Now() |
|||
|
|||
client := &apiTestClient{T: t} |
|||
|
|||
queryAPI := &httpQueryAPI{ |
|||
client: client, |
|||
} |
|||
|
|||
doQuery := func(q string, ts time.Time) func() (interface{}, error) { |
|||
return func() (interface{}, error) { |
|||
return queryAPI.Query(context.Background(), q, ts) |
|||
} |
|||
} |
|||
|
|||
doQueryRange := func(q string, rng Range) func() (interface{}, error) { |
|||
return func() (interface{}, error) { |
|||
return queryAPI.QueryRange(context.Background(), q, rng) |
|||
} |
|||
} |
|||
|
|||
queryTests := []apiTest{ |
|||
{ |
|||
do: doQuery("2", testTime), |
|||
inRes: &queryResult{ |
|||
Type: model.ValScalar, |
|||
Result: &model.Scalar{ |
|||
Value: 2, |
|||
Timestamp: model.TimeFromUnix(testTime.Unix()), |
|||
}, |
|||
}, |
|||
|
|||
reqMethod: "GET", |
|||
reqPath: "/api/v1/query", |
|||
reqParam: url.Values{ |
|||
"query": []string{"2"}, |
|||
"time": []string{testTime.Format(time.RFC3339Nano)}, |
|||
}, |
|||
res: &model.Scalar{ |
|||
Value: 2, |
|||
Timestamp: model.TimeFromUnix(testTime.Unix()), |
|||
}, |
|||
}, |
|||
{ |
|||
do: doQuery("2", testTime), |
|||
inErr: fmt.Errorf("some error"), |
|||
|
|||
reqMethod: "GET", |
|||
reqPath: "/api/v1/query", |
|||
reqParam: url.Values{ |
|||
"query": []string{"2"}, |
|||
"time": []string{testTime.Format(time.RFC3339Nano)}, |
|||
}, |
|||
err: fmt.Errorf("some error"), |
|||
}, |
|||
|
|||
{ |
|||
do: doQueryRange("2", Range{ |
|||
Start: testTime.Add(-time.Minute), |
|||
End: testTime, |
|||
Step: time.Minute, |
|||
}), |
|||
inErr: fmt.Errorf("some error"), |
|||
|
|||
reqMethod: "GET", |
|||
reqPath: "/api/v1/query_range", |
|||
reqParam: url.Values{ |
|||
"query": []string{"2"}, |
|||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, |
|||
"end": []string{testTime.Format(time.RFC3339Nano)}, |
|||
"step": []string{time.Minute.String()}, |
|||
}, |
|||
err: fmt.Errorf("some error"), |
|||
}, |
|||
} |
|||
|
|||
var tests []apiTest |
|||
tests = append(tests, queryTests...) |
|||
|
|||
for _, test := range tests { |
|||
client.curTest = test |
|||
|
|||
res, err := test.do() |
|||
|
|||
if test.err != nil { |
|||
if err == nil { |
|||
t.Errorf("expected error %q but got none", test.err) |
|||
continue |
|||
} |
|||
if err.Error() != test.err.Error() { |
|||
t.Errorf("unexpected error: want %s, got %s", test.err, err) |
|||
} |
|||
continue |
|||
} |
|||
if err != nil { |
|||
t.Errorf("unexpected error: %s", err) |
|||
continue |
|||
} |
|||
|
|||
if !reflect.DeepEqual(res, test.res) { |
|||
t.Errorf("unexpected result: want %v, got %v", test.res, res) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,103 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// A simple example exposing fictional RPC latencies with different types of
|
|||
// random distributions (uniform, normal, and exponential) as Prometheus
|
|||
// metrics.
|
|||
package main |
|||
|
|||
import ( |
|||
"flag" |
|||
"math" |
|||
"math/rand" |
|||
"net/http" |
|||
"time" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
var ( |
|||
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") |
|||
uniformDomain = flag.Float64("uniform.domain", 200, "The domain for the uniform distribution.") |
|||
normDomain = flag.Float64("normal.domain", 200, "The domain for the normal distribution.") |
|||
normMean = flag.Float64("normal.mean", 10, "The mean for the normal distribution.") |
|||
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") |
|||
) |
|||
|
|||
var ( |
|||
// Create a summary to track fictional interservice RPC latencies for three
|
|||
// distinct services with different latency distributions. These services are
|
|||
// differentiated via a "service" label.
|
|||
rpcDurations = prometheus.NewSummaryVec( |
|||
prometheus.SummaryOpts{ |
|||
Name: "rpc_durations_microseconds", |
|||
Help: "RPC latency distributions.", |
|||
}, |
|||
[]string{"service"}, |
|||
) |
|||
// The same as above, but now as a histogram, and only for the normal
|
|||
// distribution. The buckets are targeted to the parameters of the
|
|||
// normal distribution, with 20 buckets centered on the mean, each
|
|||
// half-sigma wide.
|
|||
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ |
|||
Name: "rpc_durations_histogram_microseconds", |
|||
Help: "RPC latency distributions.", |
|||
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), |
|||
}) |
|||
) |
|||
|
|||
func init() { |
|||
// Register the summary and the histogram with Prometheus's default registry.
|
|||
prometheus.MustRegister(rpcDurations) |
|||
prometheus.MustRegister(rpcDurationsHistogram) |
|||
} |
|||
|
|||
func main() { |
|||
flag.Parse() |
|||
|
|||
start := time.Now() |
|||
|
|||
oscillationFactor := func() float64 { |
|||
return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) |
|||
} |
|||
|
|||
// Periodically record some sample latencies for the three services.
|
|||
go func() { |
|||
for { |
|||
v := rand.Float64() * *uniformDomain |
|||
rpcDurations.WithLabelValues("uniform").Observe(v) |
|||
time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) |
|||
} |
|||
}() |
|||
|
|||
go func() { |
|||
for { |
|||
v := (rand.NormFloat64() * *normDomain) + *normMean |
|||
rpcDurations.WithLabelValues("normal").Observe(v) |
|||
rpcDurationsHistogram.Observe(v) |
|||
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) |
|||
} |
|||
}() |
|||
|
|||
go func() { |
|||
for { |
|||
v := rand.ExpFloat64() |
|||
rpcDurations.WithLabelValues("exponential").Observe(v) |
|||
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) |
|||
} |
|||
}() |
|||
|
|||
// Expose the registered metrics via HTTP.
|
|||
http.Handle("/metrics", prometheus.Handler()) |
|||
http.ListenAndServe(*addr, nil) |
|||
} |
|||
@ -0,0 +1,30 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// A minimal example of how to include Prometheus instrumentation.
|
|||
package main |
|||
|
|||
import ( |
|||
"flag" |
|||
"net/http" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") |
|||
|
|||
func main() { |
|||
flag.Parse() |
|||
http.Handle("/metrics", prometheus.Handler()) |
|||
http.ListenAndServe(*addr, nil) |
|||
} |
|||
@ -0,0 +1 @@ |
|||
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus). |
|||
@ -0,0 +1,183 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"sync" |
|||
"testing" |
|||
) |
|||
|
|||
func BenchmarkCounterWithLabelValues(b *testing.B) { |
|||
m := NewCounterVec( |
|||
CounterOpts{ |
|||
Name: "benchmark_counter", |
|||
Help: "A counter to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.WithLabelValues("eins", "zwei", "drei").Inc() |
|||
} |
|||
} |
|||
|
|||
func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { |
|||
m := NewCounterVec( |
|||
CounterOpts{ |
|||
Name: "benchmark_counter", |
|||
Help: "A counter to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
wg := sync.WaitGroup{} |
|||
for i := 0; i < 10; i++ { |
|||
wg.Add(1) |
|||
go func() { |
|||
for j := 0; j < b.N/10; j++ { |
|||
m.WithLabelValues("eins", "zwei", "drei").Inc() |
|||
} |
|||
wg.Done() |
|||
}() |
|||
} |
|||
wg.Wait() |
|||
} |
|||
|
|||
func BenchmarkCounterWithMappedLabels(b *testing.B) { |
|||
m := NewCounterVec( |
|||
CounterOpts{ |
|||
Name: "benchmark_counter", |
|||
Help: "A counter to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() |
|||
} |
|||
} |
|||
|
|||
func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { |
|||
m := NewCounterVec( |
|||
CounterOpts{ |
|||
Name: "benchmark_counter", |
|||
Help: "A counter to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} |
|||
for i := 0; i < b.N; i++ { |
|||
m.With(labels).Inc() |
|||
} |
|||
} |
|||
|
|||
func BenchmarkCounterNoLabels(b *testing.B) { |
|||
m := NewCounter(CounterOpts{ |
|||
Name: "benchmark_counter", |
|||
Help: "A counter to benchmark it.", |
|||
}) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.Inc() |
|||
} |
|||
} |
|||
|
|||
func BenchmarkGaugeWithLabelValues(b *testing.B) { |
|||
m := NewGaugeVec( |
|||
GaugeOpts{ |
|||
Name: "benchmark_gauge", |
|||
Help: "A gauge to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkGaugeNoLabels(b *testing.B) { |
|||
m := NewGauge(GaugeOpts{ |
|||
Name: "benchmark_gauge", |
|||
Help: "A gauge to benchmark it.", |
|||
}) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.Set(3.1415) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkSummaryWithLabelValues(b *testing.B) { |
|||
m := NewSummaryVec( |
|||
SummaryOpts{ |
|||
Name: "benchmark_summary", |
|||
Help: "A summary to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkSummaryNoLabels(b *testing.B) { |
|||
m := NewSummary(SummaryOpts{ |
|||
Name: "benchmark_summary", |
|||
Help: "A summary to benchmark it.", |
|||
}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.Observe(3.1415) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkHistogramWithLabelValues(b *testing.B) { |
|||
m := NewHistogramVec( |
|||
HistogramOpts{ |
|||
Name: "benchmark_histogram", |
|||
Help: "A histogram to benchmark it.", |
|||
}, |
|||
[]string{"one", "two", "three"}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkHistogramNoLabels(b *testing.B) { |
|||
m := NewHistogram(HistogramOpts{ |
|||
Name: "benchmark_histogram", |
|||
Help: "A histogram to benchmark it.", |
|||
}, |
|||
) |
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
m.Observe(3.1415) |
|||
} |
|||
} |
|||
@ -0,0 +1,75 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
// Collector is the interface implemented by anything that can be used by
|
|||
// Prometheus to collect metrics. A Collector has to be registered for
|
|||
// collection. See Registerer.Register.
|
|||
//
|
|||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
|||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
|||
// namely itself). An implementer of Collector may, however, collect multiple
|
|||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
|||
// for collectors already implemented in this library are the metric vectors
|
|||
// (i.e. collection of multiple instances of the same Metric but with different
|
|||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
|||
type Collector interface { |
|||
// Describe sends the super-set of all possible descriptors of metrics
|
|||
// collected by this Collector to the provided channel and returns once
|
|||
// the last descriptor has been sent. The sent descriptors fulfill the
|
|||
// consistency and uniqueness requirements described in the Desc
|
|||
// documentation. (It is valid if one and the same Collector sends
|
|||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
|||
// two different Collectors must not send duplicate descriptors.) This
|
|||
// method idempotently sends the same descriptors throughout the
|
|||
// lifetime of the Collector. If a Collector encounters an error while
|
|||
// executing this method, it must send an invalid descriptor (created
|
|||
// with NewInvalidDesc) to signal the error to the registry.
|
|||
Describe(chan<- *Desc) |
|||
// Collect is called by the Prometheus registry when collecting
|
|||
// metrics. The implementation sends each collected metric via the
|
|||
// provided channel and returns once the last metric has been sent. The
|
|||
// descriptor of each sent metric is one of those returned by
|
|||
// Describe. Returned metrics that share the same descriptor must differ
|
|||
// in their variable label values. This method may be called
|
|||
// concurrently and must therefore be implemented in a concurrency safe
|
|||
// way. Blocking occurs at the expense of total performance of rendering
|
|||
// all registered metrics. Ideally, Collector implementations support
|
|||
// concurrent readers.
|
|||
Collect(chan<- Metric) |
|||
} |
|||
|
|||
// selfCollector implements Collector for a single Metric so that the Metric
|
|||
// collects itself. Add it as an anonymous field to a struct that implements
|
|||
// Metric, and call init with the Metric itself as an argument.
|
|||
type selfCollector struct { |
|||
self Metric |
|||
} |
|||
|
|||
// init provides the selfCollector with a reference to the metric it is supposed
|
|||
// to collect. It is usually called within the factory function to create a
|
|||
// metric. See example.
|
|||
func (c *selfCollector) init(self Metric) { |
|||
c.self = self |
|||
} |
|||
|
|||
// Describe implements Collector.
|
|||
func (c *selfCollector) Describe(ch chan<- *Desc) { |
|||
ch <- c.self.Desc() |
|||
} |
|||
|
|||
// Collect implements Collector.
|
|||
func (c *selfCollector) Collect(ch chan<- Metric) { |
|||
ch <- c.self |
|||
} |
|||
@ -0,0 +1,172 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"errors" |
|||
) |
|||
|
|||
// Counter is a Metric that represents a single numerical value that only ever
|
|||
// goes up. That implies that it cannot be used to count items whose number can
|
|||
// also go down, e.g. the number of currently running goroutines. Those
|
|||
// "counters" are represented by Gauges.
|
|||
//
|
|||
// A Counter is typically used to count requests served, tasks completed, errors
|
|||
// occurred, etc.
|
|||
//
|
|||
// To create Counter instances, use NewCounter.
|
|||
type Counter interface { |
|||
Metric |
|||
Collector |
|||
|
|||
// Set is used to set the Counter to an arbitrary value. It is only used
|
|||
// if you have to transfer a value from an external counter into this
|
|||
// Prometheus metric. Do not use it for regular handling of a
|
|||
// Prometheus counter (as it can be used to break the contract of
|
|||
// monotonically increasing values).
|
|||
//
|
|||
// Deprecated: Use NewConstMetric to create a counter for an external
|
|||
// value. A Counter should never be set.
|
|||
Set(float64) |
|||
// Inc increments the counter by 1.
|
|||
Inc() |
|||
// Add adds the given value to the counter. It panics if the value is <
|
|||
// 0.
|
|||
Add(float64) |
|||
} |
|||
|
|||
// CounterOpts is an alias for Opts. See there for doc comments.
|
|||
type CounterOpts Opts |
|||
|
|||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
|||
func NewCounter(opts CounterOpts) Counter { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
) |
|||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} |
|||
result.init(result) // Init self-collection.
|
|||
return result |
|||
} |
|||
|
|||
type counter struct { |
|||
value |
|||
} |
|||
|
|||
func (c *counter) Add(v float64) { |
|||
if v < 0 { |
|||
panic(errors.New("counter cannot decrease in value")) |
|||
} |
|||
c.value.Add(v) |
|||
} |
|||
|
|||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
|||
// same Desc, but have different values for their variable labels. This is used
|
|||
// if you want to count the same thing partitioned by various dimensions
|
|||
// (e.g. number of HTTP requests, partitioned by response code and
|
|||
// method). Create instances with NewCounterVec.
|
|||
//
|
|||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
|||
// detailed documentation.
|
|||
type CounterVec struct { |
|||
*MetricVec |
|||
} |
|||
|
|||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
|||
// partitioned by the given label names. At least one label name must be
|
|||
// provided.
|
|||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
labelNames, |
|||
opts.ConstLabels, |
|||
) |
|||
return &CounterVec{ |
|||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { |
|||
result := &counter{value: value{ |
|||
desc: desc, |
|||
valType: CounterValue, |
|||
labelPairs: makeLabelPairs(desc, lvs), |
|||
}} |
|||
result.init(result) // Init self-collection.
|
|||
return result |
|||
}), |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues replaces the method of the same name in
|
|||
// MetricVec. The difference is that this method returns a Counter and not a
|
|||
// Metric so that no type conversion is required.
|
|||
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { |
|||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) |
|||
if metric != nil { |
|||
return metric.(Counter), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|||
// difference is that this method returns a Counter and not a Metric so that no
|
|||
// type conversion is required.
|
|||
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { |
|||
metric, err := m.MetricVec.GetMetricWith(labels) |
|||
if metric != nil { |
|||
return metric.(Counter), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|||
// error, WithLabelValues allows shortcuts like
|
|||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|||
func (m *CounterVec) WithLabelValues(lvs ...string) Counter { |
|||
return m.MetricVec.WithLabelValues(lvs...).(Counter) |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|||
// returned an error. By not returning an error, With allows shortcuts like
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|||
func (m *CounterVec) With(labels Labels) Counter { |
|||
return m.MetricVec.With(labels).(Counter) |
|||
} |
|||
|
|||
// CounterFunc is a Counter whose value is determined at collect time by calling a
|
|||
// provided function.
|
|||
//
|
|||
// To create CounterFunc instances, use NewCounterFunc.
|
|||
type CounterFunc interface { |
|||
Metric |
|||
Collector |
|||
} |
|||
|
|||
// NewCounterFunc creates a new CounterFunc based on the provided
|
|||
// CounterOpts. The value reported is determined by calling the given function
|
|||
// from within the Write method. Take into account that metric collection may
|
|||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|||
// the case where a CounterFunc is directly registered with Prometheus, the
|
|||
// provided function must be concurrency-safe. The function should also honor
|
|||
// the contract for a Counter (values only go up, not down), but compliance will
|
|||
// not be checked.
|
|||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { |
|||
return newValueFunc(NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), CounterValue, function) |
|||
} |
|||
@ -0,0 +1,58 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"math" |
|||
"testing" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func TestCounterAdd(t *testing.T) { |
|||
counter := NewCounter(CounterOpts{ |
|||
Name: "test", |
|||
Help: "test help", |
|||
ConstLabels: Labels{"a": "1", "b": "2"}, |
|||
}).(*counter) |
|||
counter.Inc() |
|||
if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { |
|||
t.Errorf("Expected %f, got %f.", expected, got) |
|||
} |
|||
counter.Add(42) |
|||
if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { |
|||
t.Errorf("Expected %f, got %f.", expected, got) |
|||
} |
|||
|
|||
if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { |
|||
t.Errorf("Expected error %q, got %q.", expected, got) |
|||
} |
|||
|
|||
m := &dto.Metric{} |
|||
counter.Write(m) |
|||
|
|||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > counter:<value:43 > `, m.String(); expected != got { |
|||
t.Errorf("expected %q, got %q", expected, got) |
|||
} |
|||
} |
|||
|
|||
func decreaseCounter(c *counter) (err error) { |
|||
defer func() { |
|||
if e := recover(); e != nil { |
|||
err = e.(error) |
|||
} |
|||
}() |
|||
c.Add(-1) |
|||
return nil |
|||
} |
|||
@ -0,0 +1,205 @@ |
|||
// Copyright 2016 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"regexp" |
|||
"sort" |
|||
"strings" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
var ( |
|||
metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) |
|||
labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") |
|||
) |
|||
|
|||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
|||
// label names.
|
|||
const reservedLabelPrefix = "__" |
|||
|
|||
// Labels represents a collection of label name -> value mappings. This type is
|
|||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
|||
// metric vector Collectors, e.g.:
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|||
//
|
|||
// The other use-case is the specification of constant label pairs in Opts or to
|
|||
// create a Desc.
|
|||
type Labels map[string]string |
|||
|
|||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
|||
// the immutable meta-data of a Metric. The normal Metric implementations
|
|||
// included in this package manage their Desc under the hood. Users only have to
|
|||
// deal with Desc if they use advanced features like the ExpvarCollector or
|
|||
// custom Collectors and Metrics.
|
|||
//
|
|||
// Descriptors registered with the same registry have to fulfill certain
|
|||
// consistency and uniqueness criteria if they share the same fully-qualified
|
|||
// name: They must have the same help string and the same label names (aka label
|
|||
// dimensions) in each, constLabels and variableLabels, but they must differ in
|
|||
// the values of the constLabels.
|
|||
//
|
|||
// Descriptors that share the same fully-qualified names and the same label
|
|||
// values of their constLabels are considered equal.
|
|||
//
|
|||
// Use NewDesc to create new Desc instances.
|
|||
type Desc struct { |
|||
// fqName has been built from Namespace, Subsystem, and Name.
|
|||
fqName string |
|||
// help provides some helpful information about this metric.
|
|||
help string |
|||
// constLabelPairs contains precalculated DTO label pairs based on
|
|||
// the constant labels.
|
|||
constLabelPairs []*dto.LabelPair |
|||
// VariableLabels contains names of labels for which the metric
|
|||
// maintains variable values.
|
|||
variableLabels []string |
|||
// id is a hash of the values of the ConstLabels and fqName. This
|
|||
// must be unique among all registered descriptors and can therefore be
|
|||
// used as an identifier of the descriptor.
|
|||
id uint64 |
|||
// dimHash is a hash of the label names (preset and variable) and the
|
|||
// Help string. Each Desc with the same fqName must have the same
|
|||
// dimHash.
|
|||
dimHash uint64 |
|||
// err is an error that occurred during construction. It is reported on
|
|||
// registration time.
|
|||
err error |
|||
} |
|||
|
|||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
|||
// and will be reported on registration time. variableLabels and constLabels can
|
|||
// be nil if no such labels should be set. fqName and help must not be empty.
|
|||
//
|
|||
// variableLabels only contain the label names. Their label values are variable
|
|||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
|||
//
|
|||
// For constLabels, the label values are constant. Therefore, they are fully
|
|||
// specified in the Desc. See the Opts documentation for the implications of
|
|||
// constant labels.
|
|||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { |
|||
d := &Desc{ |
|||
fqName: fqName, |
|||
help: help, |
|||
variableLabels: variableLabels, |
|||
} |
|||
if help == "" { |
|||
d.err = errors.New("empty help string") |
|||
return d |
|||
} |
|||
if !metricNameRE.MatchString(fqName) { |
|||
d.err = fmt.Errorf("%q is not a valid metric name", fqName) |
|||
return d |
|||
} |
|||
// labelValues contains the label values of const labels (in order of
|
|||
// their sorted label names) plus the fqName (at position 0).
|
|||
labelValues := make([]string, 1, len(constLabels)+1) |
|||
labelValues[0] = fqName |
|||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) |
|||
labelNameSet := map[string]struct{}{} |
|||
// First add only the const label names and sort them...
|
|||
for labelName := range constLabels { |
|||
if !checkLabelName(labelName) { |
|||
d.err = fmt.Errorf("%q is not a valid label name", labelName) |
|||
return d |
|||
} |
|||
labelNames = append(labelNames, labelName) |
|||
labelNameSet[labelName] = struct{}{} |
|||
} |
|||
sort.Strings(labelNames) |
|||
// ... so that we can now add const label values in the order of their names.
|
|||
for _, labelName := range labelNames { |
|||
labelValues = append(labelValues, constLabels[labelName]) |
|||
} |
|||
// Now add the variable label names, but prefix them with something that
|
|||
// cannot be in a regular label name. That prevents matching the label
|
|||
// dimension with a different mix between preset and variable labels.
|
|||
for _, labelName := range variableLabels { |
|||
if !checkLabelName(labelName) { |
|||
d.err = fmt.Errorf("%q is not a valid label name", labelName) |
|||
return d |
|||
} |
|||
labelNames = append(labelNames, "$"+labelName) |
|||
labelNameSet[labelName] = struct{}{} |
|||
} |
|||
if len(labelNames) != len(labelNameSet) { |
|||
d.err = errors.New("duplicate label names") |
|||
return d |
|||
} |
|||
vh := hashNew() |
|||
for _, val := range labelValues { |
|||
vh = hashAdd(vh, val) |
|||
vh = hashAddByte(vh, separatorByte) |
|||
} |
|||
d.id = vh |
|||
// Sort labelNames so that order doesn't matter for the hash.
|
|||
sort.Strings(labelNames) |
|||
// Now hash together (in this order) the help string and the sorted
|
|||
// label names.
|
|||
lh := hashNew() |
|||
lh = hashAdd(lh, help) |
|||
lh = hashAddByte(lh, separatorByte) |
|||
for _, labelName := range labelNames { |
|||
lh = hashAdd(lh, labelName) |
|||
lh = hashAddByte(lh, separatorByte) |
|||
} |
|||
d.dimHash = lh |
|||
|
|||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) |
|||
for n, v := range constLabels { |
|||
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ |
|||
Name: proto.String(n), |
|||
Value: proto.String(v), |
|||
}) |
|||
} |
|||
sort.Sort(LabelPairSorter(d.constLabelPairs)) |
|||
return d |
|||
} |
|||
|
|||
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
|
|||
// provided error set. If a collector returning such a descriptor is registered,
|
|||
// registration will fail with the provided error. NewInvalidDesc can be used by
|
|||
// a Collector to signal inability to describe itself.
|
|||
func NewInvalidDesc(err error) *Desc { |
|||
return &Desc{ |
|||
err: err, |
|||
} |
|||
} |
|||
|
|||
func (d *Desc) String() string { |
|||
lpStrings := make([]string, 0, len(d.constLabelPairs)) |
|||
for _, lp := range d.constLabelPairs { |
|||
lpStrings = append( |
|||
lpStrings, |
|||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), |
|||
) |
|||
} |
|||
return fmt.Sprintf( |
|||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", |
|||
d.fqName, |
|||
d.help, |
|||
strings.Join(lpStrings, ","), |
|||
d.variableLabels, |
|||
) |
|||
} |
|||
|
|||
func checkLabelName(l string) bool { |
|||
return labelNameRE.MatchString(l) && |
|||
!strings.HasPrefix(l, reservedLabelPrefix) |
|||
} |
|||
@ -0,0 +1,181 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Package prometheus provides metrics primitives to instrument code for
|
|||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
|||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
|||
// Pushgateway (package push).
|
|||
//
|
|||
// All exported functions and methods are safe to be used concurrently unless
|
|||
//specified otherwise.
|
|||
//
|
|||
// A Basic Example
|
|||
//
|
|||
// As a starting point, a very basic usage example:
|
|||
//
|
|||
// package main
|
|||
//
|
|||
// import (
|
|||
// "net/http"
|
|||
//
|
|||
// "github.com/prometheus/client_golang/prometheus"
|
|||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
|||
// )
|
|||
//
|
|||
// var (
|
|||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
|||
// Name: "cpu_temperature_celsius",
|
|||
// Help: "Current temperature of the CPU.",
|
|||
// })
|
|||
// hdFailures = prometheus.NewCounterVec(
|
|||
// prometheus.CounterOpts{
|
|||
// Name: "hd_errors_total",
|
|||
// Help: "Number of hard-disk errors.",
|
|||
// },
|
|||
// []string{"device"},
|
|||
// )
|
|||
// )
|
|||
//
|
|||
// func init() {
|
|||
// // Metrics have to be registered to be exposed:
|
|||
// prometheus.MustRegister(cpuTemp)
|
|||
// prometheus.MustRegister(hdFailures)
|
|||
// }
|
|||
//
|
|||
// func main() {
|
|||
// cpuTemp.Set(65.3)
|
|||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
|||
//
|
|||
// // The Handler function provides a default handler to expose metrics
|
|||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
|||
// http.Handle("/metrics", promhttp.Handler())
|
|||
// http.ListenAndServe(":8080", nil)
|
|||
// }
|
|||
//
|
|||
//
|
|||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
|||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
|||
//
|
|||
// Metrics
|
|||
//
|
|||
// The number of exported identifiers in this package might appear a bit
|
|||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
|||
// above, you only need to understand the different metric types and their
|
|||
// vector versions for basic usage.
|
|||
//
|
|||
// Above, you have already touched the Counter and the Gauge. There are two more
|
|||
// advanced metric types: the Summary and Histogram. A more thorough description
|
|||
// of those four metric types can be found in the Prometheus docs:
|
|||
// https://prometheus.io/docs/concepts/metric_types/
|
|||
//
|
|||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
|||
// Prometheus server not to assume anything about its type.
|
|||
//
|
|||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
|||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
|||
// the partitioning of samples along dimensions called labels, which results in
|
|||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
|||
// HistogramVec, and UntypedVec.
|
|||
//
|
|||
// While only the fundamental metric types implement the Metric interface, both
|
|||
// the metrics and their vector versions implement the Collector interface. A
|
|||
// Collector manages the collection of a number of Metrics, but for convenience,
|
|||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
|||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
|||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
|||
//
|
|||
// To create instances of Metrics and their vector versions, you need a suitable
|
|||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
|||
// HistogramOpts, or UntypedOpts.
|
|||
//
|
|||
// Custom Collectors and constant Metrics
|
|||
//
|
|||
// While you could create your own implementations of Metric, most likely you
|
|||
// will only ever implement the Collector interface on your own. At a first
|
|||
// glance, a custom Collector seems handy to bundle Metrics for common
|
|||
// registration (with the prime example of the different metric vectors above,
|
|||
// which bundle all the metrics of the same name but with different labels).
|
|||
//
|
|||
// There is a more involved use case, too: If you already have metrics
|
|||
// available, created outside of the Prometheus context, you don't need the
|
|||
// interface of the various Metric types. You essentially want to mirror the
|
|||
// existing numbers into Prometheus Metrics during collection. An own
|
|||
// implementation of the Collector interface is perfect for that. You can create
|
|||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
|||
// NewConstSummary (and their respective Must… versions). That will happen in
|
|||
// the Collect method. The Describe method has to return separate Desc
|
|||
// instances, representative of the “throw-away” metrics to be created
|
|||
// later. NewDesc comes in handy to create those Desc instances.
|
|||
//
|
|||
// The Collector example illustrates the use case. You can also look at the
|
|||
// source code of the processCollector (mirroring process metrics), the
|
|||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
|||
// metrics) as examples that are used in this package itself.
|
|||
//
|
|||
// If you just need to call a function to get a single float value to collect as
|
|||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
|||
// shortcuts.
|
|||
//
|
|||
// Advanced Uses of the Registry
|
|||
//
|
|||
// While MustRegister is the by far most common way of registering a Collector,
|
|||
// sometimes you might want to handle the errors the registration might
|
|||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
|||
// the Register function, the error is returned and can be handled.
|
|||
//
|
|||
// An error is returned if the registered Collector is incompatible or
|
|||
// inconsistent with already registered metrics. The registry aims for
|
|||
// consistency of the collected metrics according to the Prometheus data
|
|||
// model. Inconsistencies are ideally detected at registration time, not at
|
|||
// collect time. The former will usually be detected at start-up time of a
|
|||
// program, while the latter will only happen at scrape time, possibly not even
|
|||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
|||
// the main reason why a Collector and a Metric have to describe themselves to
|
|||
// the registry.
|
|||
//
|
|||
// So far, everything we did operated on the so-called default registry, as it
|
|||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
|||
// can create a custom registry, or you can even implement the Registerer or
|
|||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
|||
// the same way on a custom registry as the global functions Register and
|
|||
// Unregister on the default registry.
|
|||
//
|
|||
// There are a number of uses for custom registries: You can use registries
|
|||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
|||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
|||
// the same time to expose different metrics in different ways. You can use
|
|||
// separate registries for testing purposes.
|
|||
//
|
|||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
|||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
|||
// NewProcessCollector). With a custom registry, you are in control and decide
|
|||
// yourself about the Collectors to register.
|
|||
//
|
|||
// HTTP Exposition
|
|||
//
|
|||
// The Registry implements the Gatherer interface. The caller of the Gather
|
|||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
|||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
|||
// above. The tools to expose metrics via HTTP are in the promhttp
|
|||
// sub-package. (The top-level functions in the prometheus package are
|
|||
// deprecated.)
|
|||
//
|
|||
// Pushing to the Pushgateway
|
|||
//
|
|||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
|||
//
|
|||
// Other Means of Exposition
|
|||
//
|
|||
// More ways of exposing metrics can easily be added. Sending metrics to
|
|||
// Graphite would be an example that will soon be implemented.
|
|||
package prometheus |
|||
@ -0,0 +1,118 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus_test |
|||
|
|||
import "github.com/prometheus/client_golang/prometheus" |
|||
|
|||
// ClusterManager is an example for a system that might have been built without
|
|||
// Prometheus in mind. It models a central manager of jobs running in a
|
|||
// cluster. To turn it into something that collects Prometheus metrics, we
|
|||
// simply add the two methods required for the Collector interface.
|
|||
//
|
|||
// An additional challenge is that multiple instances of the ClusterManager are
|
|||
// run within the same binary, each in charge of a different zone. We need to
|
|||
// make use of ConstLabels to be able to register each ClusterManager instance
|
|||
// with Prometheus.
|
|||
type ClusterManager struct { |
|||
Zone string |
|||
OOMCountDesc *prometheus.Desc |
|||
RAMUsageDesc *prometheus.Desc |
|||
// ... many more fields
|
|||
} |
|||
|
|||
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
|
|||
// real cluster manager would have to do. Since it may actually be really
|
|||
// expensive, it must only be called once per collection. This implementation,
|
|||
// obviously, only returns some made-up data.
|
|||
func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( |
|||
oomCountByHost map[string]int, ramUsageByHost map[string]float64, |
|||
) { |
|||
// Just example fake data.
|
|||
oomCountByHost = map[string]int{ |
|||
"foo.example.org": 42, |
|||
"bar.example.org": 2001, |
|||
} |
|||
ramUsageByHost = map[string]float64{ |
|||
"foo.example.org": 6.023e23, |
|||
"bar.example.org": 3.14, |
|||
} |
|||
return |
|||
} |
|||
|
|||
// Describe simply sends the two Descs in the struct to the channel.
|
|||
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { |
|||
ch <- c.OOMCountDesc |
|||
ch <- c.RAMUsageDesc |
|||
} |
|||
|
|||
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
|
|||
// creates constant metrics for each host on the fly based on the returned data.
|
|||
//
|
|||
// Note that Collect could be called concurrently, so we depend on
|
|||
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
|
|||
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { |
|||
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() |
|||
for host, oomCount := range oomCountByHost { |
|||
ch <- prometheus.MustNewConstMetric( |
|||
c.OOMCountDesc, |
|||
prometheus.CounterValue, |
|||
float64(oomCount), |
|||
host, |
|||
) |
|||
} |
|||
for host, ramUsage := range ramUsageByHost { |
|||
ch <- prometheus.MustNewConstMetric( |
|||
c.RAMUsageDesc, |
|||
prometheus.GaugeValue, |
|||
ramUsage, |
|||
host, |
|||
) |
|||
} |
|||
} |
|||
|
|||
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
|
|||
// that the zone is set as a ConstLabel. (It's different in each instance of the
|
|||
// ClusterManager, but constant over the lifetime of an instance.) Then there is
|
|||
// a variable label "host", since we want to partition the collected metrics by
|
|||
// host. Since all Descs created in this way are consistent across instances,
|
|||
// with a guaranteed distinction by the "zone" label, we can register different
|
|||
// ClusterManager instances with the same registry.
|
|||
func NewClusterManager(zone string) *ClusterManager { |
|||
return &ClusterManager{ |
|||
Zone: zone, |
|||
OOMCountDesc: prometheus.NewDesc( |
|||
"clustermanager_oom_crashes_total", |
|||
"Number of OOM crashes.", |
|||
[]string{"host"}, |
|||
prometheus.Labels{"zone": zone}, |
|||
), |
|||
RAMUsageDesc: prometheus.NewDesc( |
|||
"clustermanager_ram_usage_bytes", |
|||
"RAM usage as reported to the cluster manager.", |
|||
[]string{"host"}, |
|||
prometheus.Labels{"zone": zone}, |
|||
), |
|||
} |
|||
} |
|||
|
|||
func ExampleCollector() { |
|||
workerDB := NewClusterManager("db") |
|||
workerCA := NewClusterManager("ca") |
|||
|
|||
// Since we are dealing with custom Collector implementations, it might
|
|||
// be a good idea to try it out with a pedantic registry.
|
|||
reg := prometheus.NewPedanticRegistry() |
|||
reg.MustRegister(workerDB) |
|||
reg.MustRegister(workerCA) |
|||
} |
|||
@ -0,0 +1,752 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus_test |
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"math" |
|||
"net/http" |
|||
"runtime" |
|||
"sort" |
|||
"strings" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
"github.com/prometheus/common/expfmt" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
func ExampleGauge() { |
|||
opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ |
|||
Namespace: "our_company", |
|||
Subsystem: "blob_storage", |
|||
Name: "ops_queued", |
|||
Help: "Number of blob storage operations waiting to be processed.", |
|||
}) |
|||
prometheus.MustRegister(opsQueued) |
|||
|
|||
// 10 operations queued by the goroutine managing incoming requests.
|
|||
opsQueued.Add(10) |
|||
// A worker goroutine has picked up a waiting operation.
|
|||
opsQueued.Dec() |
|||
// And once more...
|
|||
opsQueued.Dec() |
|||
} |
|||
|
|||
func ExampleGaugeVec() { |
|||
opsQueued := prometheus.NewGaugeVec( |
|||
prometheus.GaugeOpts{ |
|||
Namespace: "our_company", |
|||
Subsystem: "blob_storage", |
|||
Name: "ops_queued", |
|||
Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", |
|||
}, |
|||
[]string{ |
|||
// Which user has requested the operation?
|
|||
"user", |
|||
// Of what type is the operation?
|
|||
"type", |
|||
}, |
|||
) |
|||
prometheus.MustRegister(opsQueued) |
|||
|
|||
// Increase a value using compact (but order-sensitive!) WithLabelValues().
|
|||
opsQueued.WithLabelValues("bob", "put").Add(4) |
|||
// Increase a value with a map using WithLabels. More verbose, but order
|
|||
// doesn't matter anymore.
|
|||
opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() |
|||
} |
|||
|
|||
func ExampleGaugeFunc() { |
|||
if err := prometheus.Register(prometheus.NewGaugeFunc( |
|||
prometheus.GaugeOpts{ |
|||
Subsystem: "runtime", |
|||
Name: "goroutines_count", |
|||
Help: "Number of goroutines that currently exist.", |
|||
}, |
|||
func() float64 { return float64(runtime.NumGoroutine()) }, |
|||
)); err == nil { |
|||
fmt.Println("GaugeFunc 'goroutines_count' registered.") |
|||
} |
|||
// Note that the count of goroutines is a gauge (and not a counter) as
|
|||
// it can go up and down.
|
|||
|
|||
// Output:
|
|||
// GaugeFunc 'goroutines_count' registered.
|
|||
} |
|||
|
|||
func ExampleCounter() { |
|||
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Name: "repository_pushes", // Note: No help string...
|
|||
}) |
|||
err := prometheus.Register(pushCounter) // ... so this will return an error.
|
|||
if err != nil { |
|||
fmt.Println("Push counter couldn't be registered, no counting will happen:", err) |
|||
return |
|||
} |
|||
|
|||
// Try it once more, this time with a help string.
|
|||
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Name: "repository_pushes", |
|||
Help: "Number of pushes to external repository.", |
|||
}) |
|||
err = prometheus.Register(pushCounter) |
|||
if err != nil { |
|||
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) |
|||
return |
|||
} |
|||
|
|||
pushComplete := make(chan struct{}) |
|||
// TODO: Start a goroutine that performs repository pushes and reports
|
|||
// each completion via the channel.
|
|||
for range pushComplete { |
|||
pushCounter.Inc() |
|||
} |
|||
// Output:
|
|||
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
|
|||
} |
|||
|
|||
func ExampleCounterVec() { |
|||
httpReqs := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Name: "http_requests_total", |
|||
Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", |
|||
}, |
|||
[]string{"code", "method"}, |
|||
) |
|||
prometheus.MustRegister(httpReqs) |
|||
|
|||
httpReqs.WithLabelValues("404", "POST").Add(42) |
|||
|
|||
// If you have to access the same set of labels very frequently, it
|
|||
// might be good to retrieve the metric only once and keep a handle to
|
|||
// it. But beware of deletion of that metric, see below!
|
|||
m := httpReqs.WithLabelValues("200", "GET") |
|||
for i := 0; i < 1000000; i++ { |
|||
m.Inc() |
|||
} |
|||
// Delete a metric from the vector. If you have previously kept a handle
|
|||
// to that metric (as above), future updates via that handle will go
|
|||
// unseen (even if you re-create a metric with the same label set
|
|||
// later).
|
|||
httpReqs.DeleteLabelValues("200", "GET") |
|||
// Same thing with the more verbose Labels syntax.
|
|||
httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) |
|||
} |
|||
|
|||
func ExampleInstrumentHandler() { |
|||
// Handle the "/doc" endpoint with the standard http.FileServer handler.
|
|||
// By wrapping the handler with InstrumentHandler, request count,
|
|||
// request and response sizes, and request latency are automatically
|
|||
// exported to Prometheus, partitioned by HTTP status code and method
|
|||
// and by the handler name (here "fileserver").
|
|||
http.Handle("/doc", prometheus.InstrumentHandler( |
|||
"fileserver", http.FileServer(http.Dir("/usr/share/doc")), |
|||
)) |
|||
// The Prometheus handler still has to be registered to handle the
|
|||
// "/metrics" endpoint. The handler returned by prometheus.Handler() is
|
|||
// already instrumented - with "prometheus" as the handler name. In this
|
|||
// example, we want the handler name to be "metrics", so we instrument
|
|||
// the uninstrumented Prometheus handler ourselves.
|
|||
http.Handle("/metrics", prometheus.InstrumentHandler( |
|||
"metrics", prometheus.UninstrumentedHandler(), |
|||
)) |
|||
} |
|||
|
|||
func ExampleLabelPairSorter() { |
|||
labelPairs := []*dto.LabelPair{ |
|||
{Name: proto.String("status"), Value: proto.String("404")}, |
|||
{Name: proto.String("method"), Value: proto.String("get")}, |
|||
} |
|||
|
|||
sort.Sort(prometheus.LabelPairSorter(labelPairs)) |
|||
|
|||
fmt.Println(labelPairs) |
|||
// Output:
|
|||
// [name:"method" value:"get" name:"status" value:"404" ]
|
|||
} |
|||
|
|||
func ExampleRegister() { |
|||
// Imagine you have a worker pool and want to count the tasks completed.
|
|||
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Subsystem: "worker_pool", |
|||
Name: "completed_tasks_total", |
|||
Help: "Total number of tasks completed.", |
|||
}) |
|||
// This will register fine.
|
|||
if err := prometheus.Register(taskCounter); err != nil { |
|||
fmt.Println(err) |
|||
} else { |
|||
fmt.Println("taskCounter registered.") |
|||
} |
|||
// Don't forget to tell the HTTP server about the Prometheus handler.
|
|||
// (In a real program, you still need to start the HTTP server...)
|
|||
http.Handle("/metrics", prometheus.Handler()) |
|||
|
|||
// Now you can start workers and give every one of them a pointer to
|
|||
// taskCounter and let it increment it whenever it completes a task.
|
|||
taskCounter.Inc() // This has to happen somewhere in the worker code.
|
|||
|
|||
// But wait, you want to see how individual workers perform. So you need
|
|||
// a vector of counters, with one element for each worker.
|
|||
taskCounterVec := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Subsystem: "worker_pool", |
|||
Name: "completed_tasks_total", |
|||
Help: "Total number of tasks completed.", |
|||
}, |
|||
[]string{"worker_id"}, |
|||
) |
|||
|
|||
// Registering will fail because we already have a metric of that name.
|
|||
if err := prometheus.Register(taskCounterVec); err != nil { |
|||
fmt.Println("taskCounterVec not registered:", err) |
|||
} else { |
|||
fmt.Println("taskCounterVec registered.") |
|||
} |
|||
|
|||
// To fix, first unregister the old taskCounter.
|
|||
if prometheus.Unregister(taskCounter) { |
|||
fmt.Println("taskCounter unregistered.") |
|||
} |
|||
|
|||
// Try registering taskCounterVec again.
|
|||
if err := prometheus.Register(taskCounterVec); err != nil { |
|||
fmt.Println("taskCounterVec not registered:", err) |
|||
} else { |
|||
fmt.Println("taskCounterVec registered.") |
|||
} |
|||
// Bummer! Still doesn't work.
|
|||
|
|||
// Prometheus will not allow you to ever export metrics with
|
|||
// inconsistent help strings or label names. After unregistering, the
|
|||
// unregistered metrics will cease to show up in the /metrics HTTP
|
|||
// response, but the registry still remembers that those metrics had
|
|||
// been exported before. For this example, we will now choose a
|
|||
// different name. (In a real program, you would obviously not export
|
|||
// the obsolete metric in the first place.)
|
|||
taskCounterVec = prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Subsystem: "worker_pool", |
|||
Name: "completed_tasks_by_id", |
|||
Help: "Total number of tasks completed.", |
|||
}, |
|||
[]string{"worker_id"}, |
|||
) |
|||
if err := prometheus.Register(taskCounterVec); err != nil { |
|||
fmt.Println("taskCounterVec not registered:", err) |
|||
} else { |
|||
fmt.Println("taskCounterVec registered.") |
|||
} |
|||
// Finally it worked!
|
|||
|
|||
// The workers have to tell taskCounterVec their id to increment the
|
|||
// right element in the metric vector.
|
|||
taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
|
|||
|
|||
// Each worker could also keep a reference to their own counter element
|
|||
// around. Pick the counter at initialization time of the worker.
|
|||
myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
|
|||
myCounter.Inc() // Somewhere in the code of that worker.
|
|||
|
|||
// Note that something like WithLabelValues("42", "spurious arg") would
|
|||
// panic (because you have provided too many label values). If you want
|
|||
// to get an error instead, use GetMetricWithLabelValues(...) instead.
|
|||
notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") |
|||
if err != nil { |
|||
fmt.Println("Worker initialization failed:", err) |
|||
} |
|||
if notMyCounter == nil { |
|||
fmt.Println("notMyCounter is nil.") |
|||
} |
|||
|
|||
// A different (and somewhat tricky) approach is to use
|
|||
// ConstLabels. ConstLabels are pairs of label names and label values
|
|||
// that never change. You might ask what those labels are good for (and
|
|||
// rightfully so - if they never change, they could as well be part of
|
|||
// the metric name). There are essentially two use-cases: The first is
|
|||
// if labels are constant throughout the lifetime of a binary execution,
|
|||
// but they vary over time or between different instances of a running
|
|||
// binary. The second is what we have here: Each worker creates and
|
|||
// registers an own Counter instance where the only difference is in the
|
|||
// value of the ConstLabels. Those Counters can all be registered
|
|||
// because the different ConstLabel values guarantee that each worker
|
|||
// will increment a different Counter metric.
|
|||
counterOpts := prometheus.CounterOpts{ |
|||
Subsystem: "worker_pool", |
|||
Name: "completed_tasks", |
|||
Help: "Total number of tasks completed.", |
|||
ConstLabels: prometheus.Labels{"worker_id": "42"}, |
|||
} |
|||
taskCounterForWorker42 := prometheus.NewCounter(counterOpts) |
|||
if err := prometheus.Register(taskCounterForWorker42); err != nil { |
|||
fmt.Println("taskCounterVForWorker42 not registered:", err) |
|||
} else { |
|||
fmt.Println("taskCounterForWorker42 registered.") |
|||
} |
|||
// Obviously, in real code, taskCounterForWorker42 would be a member
|
|||
// variable of a worker struct, and the "42" would be retrieved with a
|
|||
// GetId() method or something. The Counter would be created and
|
|||
// registered in the initialization code of the worker.
|
|||
|
|||
// For the creation of the next Counter, we can recycle
|
|||
// counterOpts. Just change the ConstLabels.
|
|||
counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} |
|||
taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) |
|||
if err := prometheus.Register(taskCounterForWorker2001); err != nil { |
|||
fmt.Println("taskCounterVForWorker2001 not registered:", err) |
|||
} else { |
|||
fmt.Println("taskCounterForWorker2001 registered.") |
|||
} |
|||
|
|||
taskCounterForWorker2001.Inc() |
|||
taskCounterForWorker42.Inc() |
|||
taskCounterForWorker2001.Inc() |
|||
|
|||
// Yet another approach would be to turn the workers themselves into
|
|||
// Collectors and register them. See the Collector example for details.
|
|||
|
|||
// Output:
|
|||
// taskCounter registered.
|
|||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
|||
// taskCounter unregistered.
|
|||
// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
|
|||
// taskCounterVec registered.
|
|||
// Worker initialization failed: inconsistent label cardinality
|
|||
// notMyCounter is nil.
|
|||
// taskCounterForWorker42 registered.
|
|||
// taskCounterForWorker2001 registered.
|
|||
} |
|||
|
|||
func ExampleSummary() { |
|||
temps := prometheus.NewSummary(prometheus.SummaryOpts{ |
|||
Name: "pond_temperature_celsius", |
|||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
|||
}) |
|||
|
|||
// Simulate some observations.
|
|||
for i := 0; i < 1000; i++ { |
|||
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) |
|||
} |
|||
|
|||
// Just for demonstration, let's check the state of the summary by
|
|||
// (ab)using its Write method (which is usually only used by Prometheus
|
|||
// internally).
|
|||
metric := &dto.Metric{} |
|||
temps.Write(metric) |
|||
fmt.Println(proto.MarshalTextString(metric)) |
|||
|
|||
// Output:
|
|||
// summary: <
|
|||
// sample_count: 1000
|
|||
// sample_sum: 29969.50000000001
|
|||
// quantile: <
|
|||
// quantile: 0.5
|
|||
// value: 31.1
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.9
|
|||
// value: 41.3
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.99
|
|||
// value: 41.9
|
|||
// >
|
|||
// >
|
|||
} |
|||
|
|||
func ExampleSummaryVec() { |
|||
temps := prometheus.NewSummaryVec( |
|||
prometheus.SummaryOpts{ |
|||
Name: "pond_temperature_celsius", |
|||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
|||
}, |
|||
[]string{"species"}, |
|||
) |
|||
|
|||
// Simulate some observations.
|
|||
for i := 0; i < 1000; i++ { |
|||
temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) |
|||
temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) |
|||
} |
|||
|
|||
// Create a Summary without any observations.
|
|||
temps.WithLabelValues("leiopelma-hochstetteri") |
|||
|
|||
// Just for demonstration, let's check the state of the summary vector
|
|||
// by registering it with a custom registry and then let it collect the
|
|||
// metrics.
|
|||
reg := prometheus.NewRegistry() |
|||
reg.MustRegister(temps) |
|||
|
|||
metricFamilies, err := reg.Gather() |
|||
if err != nil || len(metricFamilies) != 1 { |
|||
panic("unexpected behavior of custom test registry") |
|||
} |
|||
fmt.Println(proto.MarshalTextString(metricFamilies[0])) |
|||
|
|||
// Output:
|
|||
// name: "pond_temperature_celsius"
|
|||
// help: "The temperature of the frog pond."
|
|||
// type: SUMMARY
|
|||
// metric: <
|
|||
// label: <
|
|||
// name: "species"
|
|||
// value: "leiopelma-hochstetteri"
|
|||
// >
|
|||
// summary: <
|
|||
// sample_count: 0
|
|||
// sample_sum: 0
|
|||
// quantile: <
|
|||
// quantile: 0.5
|
|||
// value: nan
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.9
|
|||
// value: nan
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.99
|
|||
// value: nan
|
|||
// >
|
|||
// >
|
|||
// >
|
|||
// metric: <
|
|||
// label: <
|
|||
// name: "species"
|
|||
// value: "lithobates-catesbeianus"
|
|||
// >
|
|||
// summary: <
|
|||
// sample_count: 1000
|
|||
// sample_sum: 31956.100000000017
|
|||
// quantile: <
|
|||
// quantile: 0.5
|
|||
// value: 32.4
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.9
|
|||
// value: 41.4
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.99
|
|||
// value: 41.9
|
|||
// >
|
|||
// >
|
|||
// >
|
|||
// metric: <
|
|||
// label: <
|
|||
// name: "species"
|
|||
// value: "litoria-caerulea"
|
|||
// >
|
|||
// summary: <
|
|||
// sample_count: 1000
|
|||
// sample_sum: 29969.50000000001
|
|||
// quantile: <
|
|||
// quantile: 0.5
|
|||
// value: 31.1
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.9
|
|||
// value: 41.3
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.99
|
|||
// value: 41.9
|
|||
// >
|
|||
// >
|
|||
// >
|
|||
} |
|||
|
|||
func ExampleNewConstSummary() { |
|||
desc := prometheus.NewDesc( |
|||
"http_request_duration_seconds", |
|||
"A summary of the HTTP request durations.", |
|||
[]string{"code", "method"}, |
|||
prometheus.Labels{"owner": "example"}, |
|||
) |
|||
|
|||
// Create a constant summary from values we got from a 3rd party telemetry system.
|
|||
s := prometheus.MustNewConstSummary( |
|||
desc, |
|||
4711, 403.34, |
|||
map[float64]float64{0.5: 42.3, 0.9: 323.3}, |
|||
"200", "get", |
|||
) |
|||
|
|||
// Just for demonstration, let's check the state of the summary by
|
|||
// (ab)using its Write method (which is usually only used by Prometheus
|
|||
// internally).
|
|||
metric := &dto.Metric{} |
|||
s.Write(metric) |
|||
fmt.Println(proto.MarshalTextString(metric)) |
|||
|
|||
// Output:
|
|||
// label: <
|
|||
// name: "code"
|
|||
// value: "200"
|
|||
// >
|
|||
// label: <
|
|||
// name: "method"
|
|||
// value: "get"
|
|||
// >
|
|||
// label: <
|
|||
// name: "owner"
|
|||
// value: "example"
|
|||
// >
|
|||
// summary: <
|
|||
// sample_count: 4711
|
|||
// sample_sum: 403.34
|
|||
// quantile: <
|
|||
// quantile: 0.5
|
|||
// value: 42.3
|
|||
// >
|
|||
// quantile: <
|
|||
// quantile: 0.9
|
|||
// value: 323.3
|
|||
// >
|
|||
// >
|
|||
} |
|||
|
|||
func ExampleHistogram() { |
|||
temps := prometheus.NewHistogram(prometheus.HistogramOpts{ |
|||
Name: "pond_temperature_celsius", |
|||
Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
|
|||
Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
|
|||
}) |
|||
|
|||
// Simulate some observations.
|
|||
for i := 0; i < 1000; i++ { |
|||
temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) |
|||
} |
|||
|
|||
// Just for demonstration, let's check the state of the histogram by
|
|||
// (ab)using its Write method (which is usually only used by Prometheus
|
|||
// internally).
|
|||
metric := &dto.Metric{} |
|||
temps.Write(metric) |
|||
fmt.Println(proto.MarshalTextString(metric)) |
|||
|
|||
// Output:
|
|||
// histogram: <
|
|||
// sample_count: 1000
|
|||
// sample_sum: 29969.50000000001
|
|||
// bucket: <
|
|||
// cumulative_count: 192
|
|||
// upper_bound: 20
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 366
|
|||
// upper_bound: 25
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 501
|
|||
// upper_bound: 30
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 638
|
|||
// upper_bound: 35
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 816
|
|||
// upper_bound: 40
|
|||
// >
|
|||
// >
|
|||
} |
|||
|
|||
func ExampleNewConstHistogram() { |
|||
desc := prometheus.NewDesc( |
|||
"http_request_duration_seconds", |
|||
"A histogram of the HTTP request durations.", |
|||
[]string{"code", "method"}, |
|||
prometheus.Labels{"owner": "example"}, |
|||
) |
|||
|
|||
// Create a constant histogram from values we got from a 3rd party telemetry system.
|
|||
h := prometheus.MustNewConstHistogram( |
|||
desc, |
|||
4711, 403.34, |
|||
map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, |
|||
"200", "get", |
|||
) |
|||
|
|||
// Just for demonstration, let's check the state of the histogram by
|
|||
// (ab)using its Write method (which is usually only used by Prometheus
|
|||
// internally).
|
|||
metric := &dto.Metric{} |
|||
h.Write(metric) |
|||
fmt.Println(proto.MarshalTextString(metric)) |
|||
|
|||
// Output:
|
|||
// label: <
|
|||
// name: "code"
|
|||
// value: "200"
|
|||
// >
|
|||
// label: <
|
|||
// name: "method"
|
|||
// value: "get"
|
|||
// >
|
|||
// label: <
|
|||
// name: "owner"
|
|||
// value: "example"
|
|||
// >
|
|||
// histogram: <
|
|||
// sample_count: 4711
|
|||
// sample_sum: 403.34
|
|||
// bucket: <
|
|||
// cumulative_count: 121
|
|||
// upper_bound: 25
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 2403
|
|||
// upper_bound: 50
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 3221
|
|||
// upper_bound: 100
|
|||
// >
|
|||
// bucket: <
|
|||
// cumulative_count: 4233
|
|||
// upper_bound: 200
|
|||
// >
|
|||
// >
|
|||
} |
|||
|
|||
func ExampleAlreadyRegisteredError() { |
|||
reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Name: "requests_total", |
|||
Help: "The total number of requests served.", |
|||
}) |
|||
if err := prometheus.Register(reqCounter); err != nil { |
|||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok { |
|||
// A counter for that metric has been registered before.
|
|||
// Use the old counter from now on.
|
|||
reqCounter = are.ExistingCollector.(prometheus.Counter) |
|||
} else { |
|||
// Something else went wrong!
|
|||
panic(err) |
|||
} |
|||
} |
|||
reqCounter.Inc() |
|||
} |
|||
|
|||
func ExampleGatherers() { |
|||
reg := prometheus.NewRegistry() |
|||
temp := prometheus.NewGaugeVec( |
|||
prometheus.GaugeOpts{ |
|||
Name: "temperature_kelvin", |
|||
Help: "Temperature in Kelvin.", |
|||
}, |
|||
[]string{"location"}, |
|||
) |
|||
reg.MustRegister(temp) |
|||
temp.WithLabelValues("outside").Set(273.14) |
|||
temp.WithLabelValues("inside").Set(298.44) |
|||
|
|||
var parser expfmt.TextParser |
|||
|
|||
text := ` |
|||
# TYPE humidity_percent gauge |
|||
# HELP humidity_percent Humidity in %. |
|||
humidity_percent{location="outside"} 45.4 |
|||
humidity_percent{location="inside"} 33.2 |
|||
# TYPE temperature_kelvin gauge |
|||
# HELP temperature_kelvin Temperature in Kelvin. |
|||
temperature_kelvin{location="somewhere else"} 4.5 |
|||
` |
|||
|
|||
parseText := func() ([]*dto.MetricFamily, error) { |
|||
parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
var result []*dto.MetricFamily |
|||
for _, mf := range parsed { |
|||
result = append(result, mf) |
|||
} |
|||
return result, nil |
|||
} |
|||
|
|||
gatherers := prometheus.Gatherers{ |
|||
reg, |
|||
prometheus.GathererFunc(parseText), |
|||
} |
|||
|
|||
gathering, err := gatherers.Gather() |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
|
|||
out := &bytes.Buffer{} |
|||
for _, mf := range gathering { |
|||
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { |
|||
panic(err) |
|||
} |
|||
} |
|||
fmt.Print(out.String()) |
|||
fmt.Println("----------") |
|||
|
|||
// Note how the temperature_kelvin metric family has been merged from
|
|||
// different sources. Now try
|
|||
text = ` |
|||
# TYPE humidity_percent gauge |
|||
# HELP humidity_percent Humidity in %. |
|||
humidity_percent{location="outside"} 45.4 |
|||
humidity_percent{location="inside"} 33.2 |
|||
# TYPE temperature_kelvin gauge |
|||
# HELP temperature_kelvin Temperature in Kelvin. |
|||
# Duplicate metric: |
|||
temperature_kelvin{location="outside"} 265.3 |
|||
# Wrong labels: |
|||
temperature_kelvin 4.5 |
|||
` |
|||
|
|||
gathering, err = gatherers.Gather() |
|||
if err != nil { |
|||
fmt.Println(err) |
|||
} |
|||
// Note that still as many metrics as possible are returned:
|
|||
out.Reset() |
|||
for _, mf := range gathering { |
|||
if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { |
|||
panic(err) |
|||
} |
|||
} |
|||
fmt.Print(out.String()) |
|||
|
|||
// Output:
|
|||
// # HELP humidity_percent Humidity in %.
|
|||
// # TYPE humidity_percent gauge
|
|||
// humidity_percent{location="inside"} 33.2
|
|||
// humidity_percent{location="outside"} 45.4
|
|||
// # HELP temperature_kelvin Temperature in Kelvin.
|
|||
// # TYPE temperature_kelvin gauge
|
|||
// temperature_kelvin{location="inside"} 298.44
|
|||
// temperature_kelvin{location="outside"} 273.14
|
|||
// temperature_kelvin{location="somewhere else"} 4.5
|
|||
// ----------
|
|||
// 2 error(s) occurred:
|
|||
// * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
|
|||
// * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
|
|||
// # HELP humidity_percent Humidity in %.
|
|||
// # TYPE humidity_percent gauge
|
|||
// humidity_percent{location="inside"} 33.2
|
|||
// humidity_percent{location="outside"} 45.4
|
|||
// # HELP temperature_kelvin Temperature in Kelvin.
|
|||
// # TYPE temperature_kelvin gauge
|
|||
// temperature_kelvin{location="inside"} 298.44
|
|||
// temperature_kelvin{location="outside"} 273.14
|
|||
} |
|||
@ -0,0 +1,119 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"encoding/json" |
|||
"expvar" |
|||
) |
|||
|
|||
type expvarCollector struct { |
|||
exports map[string]*Desc |
|||
} |
|||
|
|||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
|||
// to be registered with a Prometheus registry.
|
|||
//
|
|||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
|||
// quick way to expose numeric values that are already exported via expvar as
|
|||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
|||
// fundamentally different, and that the expvar Collector is inherently slower
|
|||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
|||
// for experiments and prototying, but you should seriously consider a more
|
|||
// direct implementation of Prometheus metrics for monitoring production
|
|||
// systems.
|
|||
//
|
|||
// The exports map has the following meaning:
|
|||
//
|
|||
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
|
|||
// want to export as Prometheus metric, you need an entry in the exports
|
|||
// map. The descriptor mapped to each key describes how to export the expvar
|
|||
// value. It defines the name and the help string of the Prometheus metric
|
|||
// proxying the expvar value. The type will always be Untyped.
|
|||
//
|
|||
// For descriptors without variable labels, the expvar value must be a number or
|
|||
// a bool. The number is then directly exported as the Prometheus sample
|
|||
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
|
|||
// that are not numbers or bools are silently ignored.
|
|||
//
|
|||
// If the descriptor has one variable label, the expvar value must be an expvar
|
|||
// map. The keys in the expvar map become the various values of the one
|
|||
// Prometheus label. The values in the expvar map must be numbers or bools again
|
|||
// as above.
|
|||
//
|
|||
// For descriptors with more than one variable label, the expvar must be a
|
|||
// nested expvar map, i.e. where the values of the topmost map are maps again
|
|||
// etc. until a depth is reached that corresponds to the number of labels. The
|
|||
// leaves of that structure must be numbers or bools as above to serve as the
|
|||
// sample values.
|
|||
//
|
|||
// Anything that does not fit into the scheme above is silently ignored.
|
|||
func NewExpvarCollector(exports map[string]*Desc) Collector { |
|||
return &expvarCollector{ |
|||
exports: exports, |
|||
} |
|||
} |
|||
|
|||
// Describe implements Collector.
|
|||
func (e *expvarCollector) Describe(ch chan<- *Desc) { |
|||
for _, desc := range e.exports { |
|||
ch <- desc |
|||
} |
|||
} |
|||
|
|||
// Collect implements Collector.
|
|||
func (e *expvarCollector) Collect(ch chan<- Metric) { |
|||
for name, desc := range e.exports { |
|||
var m Metric |
|||
expVar := expvar.Get(name) |
|||
if expVar == nil { |
|||
continue |
|||
} |
|||
var v interface{} |
|||
labels := make([]string, len(desc.variableLabels)) |
|||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { |
|||
ch <- NewInvalidMetric(desc, err) |
|||
continue |
|||
} |
|||
var processValue func(v interface{}, i int) |
|||
processValue = func(v interface{}, i int) { |
|||
if i >= len(labels) { |
|||
copiedLabels := append(make([]string, 0, len(labels)), labels...) |
|||
switch v := v.(type) { |
|||
case float64: |
|||
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) |
|||
case bool: |
|||
if v { |
|||
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) |
|||
} else { |
|||
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) |
|||
} |
|||
default: |
|||
return |
|||
} |
|||
ch <- m |
|||
return |
|||
} |
|||
vm, ok := v.(map[string]interface{}) |
|||
if !ok { |
|||
return |
|||
} |
|||
for lv, val := range vm { |
|||
labels[i] = lv |
|||
processValue(val, i+1) |
|||
} |
|||
} |
|||
processValue(v, 0) |
|||
} |
|||
} |
|||
@ -0,0 +1,97 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus_test |
|||
|
|||
import ( |
|||
"expvar" |
|||
"fmt" |
|||
"sort" |
|||
"strings" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
func ExampleNewExpvarCollector() { |
|||
expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ |
|||
"memstats": prometheus.NewDesc( |
|||
"expvar_memstats", |
|||
"All numeric memstats as one metric family. Not a good role-model, actually... ;-)", |
|||
[]string{"type"}, nil, |
|||
), |
|||
"lone-int": prometheus.NewDesc( |
|||
"expvar_lone_int", |
|||
"Just an expvar int as an example.", |
|||
nil, nil, |
|||
), |
|||
"http-request-map": prometheus.NewDesc( |
|||
"expvar_http_request_total", |
|||
"How many http requests processed, partitioned by status code and http method.", |
|||
[]string{"code", "method"}, nil, |
|||
), |
|||
}) |
|||
prometheus.MustRegister(expvarCollector) |
|||
|
|||
// The Prometheus part is done here. But to show that this example is
|
|||
// doing anything, we have to manually export something via expvar. In
|
|||
// real-life use-cases, some library would already have exported via
|
|||
// expvar what we want to re-export as Prometheus metrics.
|
|||
expvar.NewInt("lone-int").Set(42) |
|||
expvarMap := expvar.NewMap("http-request-map") |
|||
var ( |
|||
expvarMap1, expvarMap2 expvar.Map |
|||
expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int |
|||
) |
|||
expvarMap1.Init() |
|||
expvarMap2.Init() |
|||
expvarInt11.Set(3) |
|||
expvarInt12.Set(13) |
|||
expvarInt21.Set(11) |
|||
expvarInt22.Set(212) |
|||
expvarMap1.Set("POST", &expvarInt11) |
|||
expvarMap1.Set("GET", &expvarInt12) |
|||
expvarMap2.Set("POST", &expvarInt21) |
|||
expvarMap2.Set("GET", &expvarInt22) |
|||
expvarMap.Set("404", &expvarMap1) |
|||
expvarMap.Set("200", &expvarMap2) |
|||
// Results in the following expvar map:
|
|||
// "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
|
|||
|
|||
// Let's see what the scrape would yield, but exclude the memstats metrics.
|
|||
metricStrings := []string{} |
|||
metric := dto.Metric{} |
|||
metricChan := make(chan prometheus.Metric) |
|||
go func() { |
|||
expvarCollector.Collect(metricChan) |
|||
close(metricChan) |
|||
}() |
|||
for m := range metricChan { |
|||
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { |
|||
metric.Reset() |
|||
m.Write(&metric) |
|||
metricStrings = append(metricStrings, metric.String()) |
|||
} |
|||
} |
|||
sort.Strings(metricStrings) |
|||
for _, s := range metricStrings { |
|||
fmt.Println(strings.TrimRight(s, " ")) |
|||
} |
|||
// Output:
|
|||
// label:<name:"code" value:"200" > label:<name:"method" value:"GET" > untyped:<value:212 >
|
|||
// label:<name:"code" value:"200" > label:<name:"method" value:"POST" > untyped:<value:11 >
|
|||
// label:<name:"code" value:"404" > label:<name:"method" value:"GET" > untyped:<value:13 >
|
|||
// label:<name:"code" value:"404" > label:<name:"method" value:"POST" > untyped:<value:3 >
|
|||
// untyped:<value:42 >
|
|||
} |
|||
@ -0,0 +1,29 @@ |
|||
package prometheus |
|||
|
|||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
|||
|
|||
const ( |
|||
offset64 = 14695981039346656037 |
|||
prime64 = 1099511628211 |
|||
) |
|||
|
|||
// hashNew initializies a new fnv64a hash value.
|
|||
func hashNew() uint64 { |
|||
return offset64 |
|||
} |
|||
|
|||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
|||
func hashAdd(h uint64, s string) uint64 { |
|||
for i := 0; i < len(s); i++ { |
|||
h ^= uint64(s[i]) |
|||
h *= prime64 |
|||
} |
|||
return h |
|||
} |
|||
|
|||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
|||
func hashAddByte(h uint64, b byte) uint64 { |
|||
h ^= uint64(b) |
|||
h *= prime64 |
|||
return h |
|||
} |
|||
@ -0,0 +1,140 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
// Gauge is a Metric that represents a single numerical value that can
|
|||
// arbitrarily go up and down.
|
|||
//
|
|||
// A Gauge is typically used for measured values like temperatures or current
|
|||
// memory usage, but also "counts" that can go up and down, like the number of
|
|||
// running goroutines.
|
|||
//
|
|||
// To create Gauge instances, use NewGauge.
|
|||
type Gauge interface { |
|||
Metric |
|||
Collector |
|||
|
|||
// Set sets the Gauge to an arbitrary value.
|
|||
Set(float64) |
|||
// Inc increments the Gauge by 1.
|
|||
Inc() |
|||
// Dec decrements the Gauge by 1.
|
|||
Dec() |
|||
// Add adds the given value to the Gauge. (The value can be
|
|||
// negative, resulting in a decrease of the Gauge.)
|
|||
Add(float64) |
|||
// Sub subtracts the given value from the Gauge. (The value can be
|
|||
// negative, resulting in an increase of the Gauge.)
|
|||
Sub(float64) |
|||
} |
|||
|
|||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
|||
type GaugeOpts Opts |
|||
|
|||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
|||
func NewGauge(opts GaugeOpts) Gauge { |
|||
return newValue(NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), GaugeValue, 0) |
|||
} |
|||
|
|||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
|||
// Desc, but have different values for their variable labels. This is used if
|
|||
// you want to count the same thing partitioned by various dimensions
|
|||
// (e.g. number of operations queued, partitioned by user and operation
|
|||
// type). Create instances with NewGaugeVec.
|
|||
type GaugeVec struct { |
|||
*MetricVec |
|||
} |
|||
|
|||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
|||
// partitioned by the given label names. At least one label name must be
|
|||
// provided.
|
|||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
labelNames, |
|||
opts.ConstLabels, |
|||
) |
|||
return &GaugeVec{ |
|||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { |
|||
return newValue(desc, GaugeValue, 0, lvs...) |
|||
}), |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues replaces the method of the same name in
|
|||
// MetricVec. The difference is that this method returns a Gauge and not a
|
|||
// Metric so that no type conversion is required.
|
|||
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { |
|||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) |
|||
if metric != nil { |
|||
return metric.(Gauge), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|||
// difference is that this method returns a Gauge and not a Metric so that no
|
|||
// type conversion is required.
|
|||
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { |
|||
metric, err := m.MetricVec.GetMetricWith(labels) |
|||
if metric != nil { |
|||
return metric.(Gauge), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|||
// error, WithLabelValues allows shortcuts like
|
|||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|||
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { |
|||
return m.MetricVec.WithLabelValues(lvs...).(Gauge) |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|||
// returned an error. By not returning an error, With allows shortcuts like
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|||
func (m *GaugeVec) With(labels Labels) Gauge { |
|||
return m.MetricVec.With(labels).(Gauge) |
|||
} |
|||
|
|||
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
|
|||
// provided function.
|
|||
//
|
|||
// To create GaugeFunc instances, use NewGaugeFunc.
|
|||
type GaugeFunc interface { |
|||
Metric |
|||
Collector |
|||
} |
|||
|
|||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
|||
// value reported is determined by calling the given function from within the
|
|||
// Write method. Take into account that metric collection may happen
|
|||
// concurrently. If that results in concurrent calls to Write, like in the case
|
|||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
|||
// function must be concurrency-safe.
|
|||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { |
|||
return newValueFunc(NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), GaugeValue, function) |
|||
} |
|||
@ -0,0 +1,182 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"math" |
|||
"math/rand" |
|||
"sync" |
|||
"testing" |
|||
"testing/quick" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func listenGaugeStream(vals, result chan float64, done chan struct{}) { |
|||
var sum float64 |
|||
outer: |
|||
for { |
|||
select { |
|||
case <-done: |
|||
close(vals) |
|||
for v := range vals { |
|||
sum += v |
|||
} |
|||
break outer |
|||
case v := <-vals: |
|||
sum += v |
|||
} |
|||
} |
|||
result <- sum |
|||
close(result) |
|||
} |
|||
|
|||
func TestGaugeConcurrency(t *testing.T) { |
|||
it := func(n uint32) bool { |
|||
mutations := int(n % 10000) |
|||
concLevel := int(n%15 + 1) |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
sStream := make(chan float64, mutations*concLevel) |
|||
result := make(chan float64) |
|||
done := make(chan struct{}) |
|||
|
|||
go listenGaugeStream(sStream, result, done) |
|||
go func() { |
|||
end.Wait() |
|||
close(done) |
|||
}() |
|||
|
|||
gge := NewGauge(GaugeOpts{ |
|||
Name: "test_gauge", |
|||
Help: "no help can be found here", |
|||
}) |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
vals[j] = rand.Float64() - 0.5 |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for _, v := range vals { |
|||
sStream <- v |
|||
gge.Add(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
start.Done() |
|||
|
|||
if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { |
|||
t.Fatalf("expected approx. %f, got %f", expected, got) |
|||
return false |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
|
|||
func TestGaugeVecConcurrency(t *testing.T) { |
|||
it := func(n uint32) bool { |
|||
mutations := int(n % 10000) |
|||
concLevel := int(n%15 + 1) |
|||
vecLength := int(n%5 + 1) |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
sStreams := make([]chan float64, vecLength) |
|||
results := make([]chan float64, vecLength) |
|||
done := make(chan struct{}) |
|||
|
|||
for i := 0; i < vecLength; i++ { |
|||
sStreams[i] = make(chan float64, mutations*concLevel) |
|||
results[i] = make(chan float64) |
|||
go listenGaugeStream(sStreams[i], results[i], done) |
|||
} |
|||
|
|||
go func() { |
|||
end.Wait() |
|||
close(done) |
|||
}() |
|||
|
|||
gge := NewGaugeVec( |
|||
GaugeOpts{ |
|||
Name: "test_gauge", |
|||
Help: "no help can be found here", |
|||
}, |
|||
[]string{"label"}, |
|||
) |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
pick := make([]int, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
vals[j] = rand.Float64() - 0.5 |
|||
pick[j] = rand.Intn(vecLength) |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for i, v := range vals { |
|||
sStreams[pick[i]] <- v |
|||
gge.WithLabelValues(string('A' + pick[i])).Add(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
start.Done() |
|||
|
|||
for i := range sStreams { |
|||
if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { |
|||
t.Fatalf("expected approx. %f, got %f", expected, got) |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
|
|||
func TestGaugeFunc(t *testing.T) { |
|||
gf := NewGaugeFunc( |
|||
GaugeOpts{ |
|||
Name: "test_name", |
|||
Help: "test help", |
|||
ConstLabels: Labels{"a": "1", "b": "2"}, |
|||
}, |
|||
func() float64 { return 3.1415 }, |
|||
) |
|||
|
|||
if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { |
|||
t.Errorf("expected %q, got %q", expected, got) |
|||
} |
|||
|
|||
m := &dto.Metric{} |
|||
gf.Write(m) |
|||
|
|||
if expected, got := `label:<name:"a" value:"1" > label:<name:"b" value:"2" > gauge:<value:3.1415 > `, m.String(); expected != got { |
|||
t.Errorf("expected %q, got %q", expected, got) |
|||
} |
|||
} |
|||
@ -0,0 +1,263 @@ |
|||
package prometheus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"runtime" |
|||
"runtime/debug" |
|||
"time" |
|||
) |
|||
|
|||
type goCollector struct { |
|||
goroutines Gauge |
|||
gcDesc *Desc |
|||
|
|||
// metrics to describe and collect
|
|||
metrics memStatsMetrics |
|||
} |
|||
|
|||
// NewGoCollector returns a collector which exports metrics about the current
|
|||
// go process.
|
|||
func NewGoCollector() Collector { |
|||
return &goCollector{ |
|||
goroutines: NewGauge(GaugeOpts{ |
|||
Namespace: "go", |
|||
Name: "goroutines", |
|||
Help: "Number of goroutines that currently exist.", |
|||
}), |
|||
gcDesc: NewDesc( |
|||
"go_gc_duration_seconds", |
|||
"A summary of the GC invocation durations.", |
|||
nil, nil), |
|||
metrics: memStatsMetrics{ |
|||
{ |
|||
desc: NewDesc( |
|||
memstatNamespace("alloc_bytes"), |
|||
"Number of bytes allocated and still in use.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("alloc_bytes_total"), |
|||
"Total number of bytes allocated, even if freed.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, |
|||
valType: CounterValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("sys_bytes"), |
|||
"Number of bytes obtained by system. Sum of all system allocations.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("lookups_total"), |
|||
"Total number of pointer lookups.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, |
|||
valType: CounterValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("mallocs_total"), |
|||
"Total number of mallocs.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, |
|||
valType: CounterValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("frees_total"), |
|||
"Total number of frees.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, |
|||
valType: CounterValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_alloc_bytes"), |
|||
"Number of heap bytes allocated and still in use.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_sys_bytes"), |
|||
"Number of heap bytes obtained from system.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_idle_bytes"), |
|||
"Number of heap bytes waiting to be used.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_inuse_bytes"), |
|||
"Number of heap bytes that are in use.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_released_bytes_total"), |
|||
"Total number of heap bytes released to OS.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, |
|||
valType: CounterValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("heap_objects"), |
|||
"Number of allocated objects.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("stack_inuse_bytes"), |
|||
"Number of bytes in use by the stack allocator.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("stack_sys_bytes"), |
|||
"Number of bytes obtained from system for stack allocator.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("mspan_inuse_bytes"), |
|||
"Number of bytes in use by mspan structures.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("mspan_sys_bytes"), |
|||
"Number of bytes used for mspan structures obtained from system.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("mcache_inuse_bytes"), |
|||
"Number of bytes in use by mcache structures.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("mcache_sys_bytes"), |
|||
"Number of bytes used for mcache structures obtained from system.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("buck_hash_sys_bytes"), |
|||
"Number of bytes used by the profiling bucket hash table.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("gc_sys_bytes"), |
|||
"Number of bytes used for garbage collection system metadata.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("other_sys_bytes"), |
|||
"Number of bytes used for other system allocations.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("next_gc_bytes"), |
|||
"Number of heap bytes when next garbage collection will take place.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, |
|||
valType: GaugeValue, |
|||
}, { |
|||
desc: NewDesc( |
|||
memstatNamespace("last_gc_time_seconds"), |
|||
"Number of seconds since 1970 of last garbage collection.", |
|||
nil, nil, |
|||
), |
|||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, |
|||
valType: GaugeValue, |
|||
}, |
|||
}, |
|||
} |
|||
} |
|||
|
|||
func memstatNamespace(s string) string { |
|||
return fmt.Sprintf("go_memstats_%s", s) |
|||
} |
|||
|
|||
// Describe returns all descriptions of the collector.
|
|||
func (c *goCollector) Describe(ch chan<- *Desc) { |
|||
ch <- c.goroutines.Desc() |
|||
ch <- c.gcDesc |
|||
|
|||
for _, i := range c.metrics { |
|||
ch <- i.desc |
|||
} |
|||
} |
|||
|
|||
// Collect returns the current state of all metrics of the collector.
|
|||
func (c *goCollector) Collect(ch chan<- Metric) { |
|||
c.goroutines.Set(float64(runtime.NumGoroutine())) |
|||
ch <- c.goroutines |
|||
|
|||
var stats debug.GCStats |
|||
stats.PauseQuantiles = make([]time.Duration, 5) |
|||
debug.ReadGCStats(&stats) |
|||
|
|||
quantiles := make(map[float64]float64) |
|||
for idx, pq := range stats.PauseQuantiles[1:] { |
|||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() |
|||
} |
|||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds() |
|||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) |
|||
|
|||
ms := &runtime.MemStats{} |
|||
runtime.ReadMemStats(ms) |
|||
for _, i := range c.metrics { |
|||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) |
|||
} |
|||
} |
|||
|
|||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
|||
type memStatsMetrics []struct { |
|||
desc *Desc |
|||
eval func(*runtime.MemStats) float64 |
|||
valType ValueType |
|||
} |
|||
@ -0,0 +1,123 @@ |
|||
package prometheus |
|||
|
|||
import ( |
|||
"runtime" |
|||
"testing" |
|||
"time" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func TestGoCollector(t *testing.T) { |
|||
var ( |
|||
c = NewGoCollector() |
|||
ch = make(chan Metric) |
|||
waitc = make(chan struct{}) |
|||
closec = make(chan struct{}) |
|||
old = -1 |
|||
) |
|||
defer close(closec) |
|||
|
|||
go func() { |
|||
c.Collect(ch) |
|||
go func(c <-chan struct{}) { |
|||
<-c |
|||
}(closec) |
|||
<-waitc |
|||
c.Collect(ch) |
|||
}() |
|||
|
|||
for { |
|||
select { |
|||
case metric := <-ch: |
|||
switch m := metric.(type) { |
|||
// Attention, this also catches Counter...
|
|||
case Gauge: |
|||
pb := &dto.Metric{} |
|||
m.Write(pb) |
|||
if pb.GetGauge() == nil { |
|||
continue |
|||
} |
|||
|
|||
if old == -1 { |
|||
old = int(pb.GetGauge().GetValue()) |
|||
close(waitc) |
|||
continue |
|||
} |
|||
|
|||
if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { |
|||
// TODO: This is flaky in highly concurrent situations.
|
|||
t.Errorf("want 1 new goroutine, got %d", diff) |
|||
} |
|||
|
|||
// GoCollector performs two sends per call.
|
|||
// On line 27 we need to receive the second send
|
|||
// to shut down cleanly.
|
|||
<-ch |
|||
return |
|||
} |
|||
case <-time.After(1 * time.Second): |
|||
t.Fatalf("expected collect timed out") |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestGCCollector(t *testing.T) { |
|||
var ( |
|||
c = NewGoCollector() |
|||
ch = make(chan Metric) |
|||
waitc = make(chan struct{}) |
|||
closec = make(chan struct{}) |
|||
oldGC uint64 |
|||
oldPause float64 |
|||
) |
|||
defer close(closec) |
|||
|
|||
go func() { |
|||
c.Collect(ch) |
|||
// force GC
|
|||
runtime.GC() |
|||
<-waitc |
|||
c.Collect(ch) |
|||
}() |
|||
|
|||
first := true |
|||
for { |
|||
select { |
|||
case metric := <-ch: |
|||
switch m := metric.(type) { |
|||
case *constSummary, *value: |
|||
pb := &dto.Metric{} |
|||
m.Write(pb) |
|||
if pb.GetSummary() == nil { |
|||
continue |
|||
} |
|||
|
|||
if len(pb.GetSummary().Quantile) != 5 { |
|||
t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) |
|||
} |
|||
for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { |
|||
if *pb.GetSummary().Quantile[idx].Quantile != want { |
|||
t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) |
|||
} |
|||
} |
|||
if first { |
|||
first = false |
|||
oldGC = *pb.GetSummary().SampleCount |
|||
oldPause = *pb.GetSummary().SampleSum |
|||
close(waitc) |
|||
continue |
|||
} |
|||
if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { |
|||
t.Errorf("want 1 new garbage collection run, got %d", diff) |
|||
} |
|||
if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { |
|||
t.Errorf("want moar pause, got %f", diff) |
|||
} |
|||
return |
|||
} |
|||
case <-time.After(1 * time.Second): |
|||
t.Fatalf("expected collect timed out") |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,444 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"math" |
|||
"sort" |
|||
"sync/atomic" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
// A Histogram counts individual observations from an event or sample stream in
|
|||
// configurable buckets. Similar to a summary, it also provides a sum of
|
|||
// observations and an observation count.
|
|||
//
|
|||
// On the Prometheus server, quantiles can be calculated from a Histogram using
|
|||
// the histogram_quantile function in the query language.
|
|||
//
|
|||
// Note that Histograms, in contrast to Summaries, can be aggregated with the
|
|||
// Prometheus query language (see the documentation for detailed
|
|||
// procedures). However, Histograms require the user to pre-define suitable
|
|||
// buckets, and they are in general less accurate. The Observe method of a
|
|||
// Histogram has a very low performance overhead in comparison with the Observe
|
|||
// method of a Summary.
|
|||
//
|
|||
// To create Histogram instances, use NewHistogram.
|
|||
type Histogram interface { |
|||
Metric |
|||
Collector |
|||
|
|||
// Observe adds a single observation to the histogram.
|
|||
Observe(float64) |
|||
} |
|||
|
|||
// bucketLabel is used for the label that defines the upper bound of a
|
|||
// bucket of a histogram ("le" -> "less or equal").
|
|||
const bucketLabel = "le" |
|||
|
|||
// DefBuckets are the default Histogram buckets. The default buckets are
|
|||
// tailored to broadly measure the response time (in seconds) of a network
|
|||
// service. Most likely, however, you will be required to define buckets
|
|||
// customized to your use case.
|
|||
var ( |
|||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} |
|||
|
|||
errBucketLabelNotAllowed = fmt.Errorf( |
|||
"%q is not allowed as label name in histograms", bucketLabel, |
|||
) |
|||
) |
|||
|
|||
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
|
|||
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
|
|||
// and not included in the returned slice. The returned slice is meant to be
|
|||
// used for the Buckets field of HistogramOpts.
|
|||
//
|
|||
// The function panics if 'count' is zero or negative.
|
|||
func LinearBuckets(start, width float64, count int) []float64 { |
|||
if count < 1 { |
|||
panic("LinearBuckets needs a positive count") |
|||
} |
|||
buckets := make([]float64, count) |
|||
for i := range buckets { |
|||
buckets[i] = start |
|||
start += width |
|||
} |
|||
return buckets |
|||
} |
|||
|
|||
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
|
|||
// upper bound of 'start' and each following bucket's upper bound is 'factor'
|
|||
// times the previous bucket's upper bound. The final +Inf bucket is not counted
|
|||
// and not included in the returned slice. The returned slice is meant to be
|
|||
// used for the Buckets field of HistogramOpts.
|
|||
//
|
|||
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
|
|||
// or if 'factor' is less than or equal 1.
|
|||
func ExponentialBuckets(start, factor float64, count int) []float64 { |
|||
if count < 1 { |
|||
panic("ExponentialBuckets needs a positive count") |
|||
} |
|||
if start <= 0 { |
|||
panic("ExponentialBuckets needs a positive start value") |
|||
} |
|||
if factor <= 1 { |
|||
panic("ExponentialBuckets needs a factor greater than 1") |
|||
} |
|||
buckets := make([]float64, count) |
|||
for i := range buckets { |
|||
buckets[i] = start |
|||
start *= factor |
|||
} |
|||
return buckets |
|||
} |
|||
|
|||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
|||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
|||
// optional and can safely be left at their zero value.
|
|||
type HistogramOpts struct { |
|||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|||
// name of the Histogram (created by joining these components with
|
|||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|||
// name. Note that the fully-qualified name of the Histogram must be a
|
|||
// valid Prometheus metric name.
|
|||
Namespace string |
|||
Subsystem string |
|||
Name string |
|||
|
|||
// Help provides information about this Histogram. Mandatory!
|
|||
//
|
|||
// Metrics with the same fully-qualified name must have the same Help
|
|||
// string.
|
|||
Help string |
|||
|
|||
// ConstLabels are used to attach fixed labels to this
|
|||
// Histogram. Histograms with the same fully-qualified name must have the
|
|||
// same label names in their ConstLabels.
|
|||
//
|
|||
// Note that in most cases, labels have a value that varies during the
|
|||
// lifetime of a process. Those labels are usually managed with a
|
|||
// HistogramVec. ConstLabels serve only special purposes. One is for the
|
|||
// special case where the value of a label does not change during the
|
|||
// lifetime of a process, e.g. if the revision of the running binary is
|
|||
// put into a label. Another, more advanced purpose is if more than one
|
|||
// Collector needs to collect Histograms with the same fully-qualified
|
|||
// name. In that case, those Summaries must differ in the values of
|
|||
// their ConstLabels. See the Collector examples.
|
|||
//
|
|||
// If the value of a label never changes (not even between binaries),
|
|||
// that label most likely should not be a label at all (but part of the
|
|||
// metric name).
|
|||
ConstLabels Labels |
|||
|
|||
// Buckets defines the buckets into which observations are counted. Each
|
|||
// element in the slice is the upper inclusive bound of a bucket. The
|
|||
// values must be sorted in strictly increasing order. There is no need
|
|||
// to add a highest bucket with +Inf bound, it will be added
|
|||
// implicitly. The default value is DefBuckets.
|
|||
Buckets []float64 |
|||
} |
|||
|
|||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
|||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
|||
func NewHistogram(opts HistogramOpts) Histogram { |
|||
return newHistogram( |
|||
NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), |
|||
opts, |
|||
) |
|||
} |
|||
|
|||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { |
|||
if len(desc.variableLabels) != len(labelValues) { |
|||
panic(errInconsistentCardinality) |
|||
} |
|||
|
|||
for _, n := range desc.variableLabels { |
|||
if n == bucketLabel { |
|||
panic(errBucketLabelNotAllowed) |
|||
} |
|||
} |
|||
for _, lp := range desc.constLabelPairs { |
|||
if lp.GetName() == bucketLabel { |
|||
panic(errBucketLabelNotAllowed) |
|||
} |
|||
} |
|||
|
|||
if len(opts.Buckets) == 0 { |
|||
opts.Buckets = DefBuckets |
|||
} |
|||
|
|||
h := &histogram{ |
|||
desc: desc, |
|||
upperBounds: opts.Buckets, |
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
} |
|||
for i, upperBound := range h.upperBounds { |
|||
if i < len(h.upperBounds)-1 { |
|||
if upperBound >= h.upperBounds[i+1] { |
|||
panic(fmt.Errorf( |
|||
"histogram buckets must be in increasing order: %f >= %f", |
|||
upperBound, h.upperBounds[i+1], |
|||
)) |
|||
} |
|||
} else { |
|||
if math.IsInf(upperBound, +1) { |
|||
// The +Inf bucket is implicit. Remove it here.
|
|||
h.upperBounds = h.upperBounds[:i] |
|||
} |
|||
} |
|||
} |
|||
// Finally we know the final length of h.upperBounds and can make counts.
|
|||
h.counts = make([]uint64, len(h.upperBounds)) |
|||
|
|||
h.init(h) // Init self-collection.
|
|||
return h |
|||
} |
|||
|
|||
type histogram struct { |
|||
// sumBits contains the bits of the float64 representing the sum of all
|
|||
// observations. sumBits and count have to go first in the struct to
|
|||
// guarantee alignment for atomic operations.
|
|||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|||
sumBits uint64 |
|||
count uint64 |
|||
|
|||
selfCollector |
|||
// Note that there is no mutex required.
|
|||
|
|||
desc *Desc |
|||
|
|||
upperBounds []float64 |
|||
counts []uint64 |
|||
|
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
func (h *histogram) Desc() *Desc { |
|||
return h.desc |
|||
} |
|||
|
|||
func (h *histogram) Observe(v float64) { |
|||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
|||
// slightly faster than the binary search. If we really care, we could
|
|||
// switch from one search strategy to the other depending on the number
|
|||
// of buckets.
|
|||
//
|
|||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
|||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
|||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
|||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
|||
i := sort.SearchFloat64s(h.upperBounds, v) |
|||
if i < len(h.counts) { |
|||
atomic.AddUint64(&h.counts[i], 1) |
|||
} |
|||
atomic.AddUint64(&h.count, 1) |
|||
for { |
|||
oldBits := atomic.LoadUint64(&h.sumBits) |
|||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) |
|||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { |
|||
break |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (h *histogram) Write(out *dto.Metric) error { |
|||
his := &dto.Histogram{} |
|||
buckets := make([]*dto.Bucket, len(h.upperBounds)) |
|||
|
|||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) |
|||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) |
|||
var count uint64 |
|||
for i, upperBound := range h.upperBounds { |
|||
count += atomic.LoadUint64(&h.counts[i]) |
|||
buckets[i] = &dto.Bucket{ |
|||
CumulativeCount: proto.Uint64(count), |
|||
UpperBound: proto.Float64(upperBound), |
|||
} |
|||
} |
|||
his.Bucket = buckets |
|||
out.Histogram = his |
|||
out.Label = h.labelPairs |
|||
return nil |
|||
} |
|||
|
|||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
|||
// same Desc, but have different values for their variable labels. This is used
|
|||
// if you want to count the same thing partitioned by various dimensions
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
|||
// instances with NewHistogramVec.
|
|||
type HistogramVec struct { |
|||
*MetricVec |
|||
} |
|||
|
|||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
|||
// partitioned by the given label names. At least one label name must be
|
|||
// provided.
|
|||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
labelNames, |
|||
opts.ConstLabels, |
|||
) |
|||
return &HistogramVec{ |
|||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { |
|||
return newHistogram(desc, opts, lvs...) |
|||
}), |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues replaces the method of the same name in
|
|||
// MetricVec. The difference is that this method returns a Histogram and not a
|
|||
// Metric so that no type conversion is required.
|
|||
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { |
|||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) |
|||
if metric != nil { |
|||
return metric.(Histogram), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|||
// difference is that this method returns a Histogram and not a Metric so that no
|
|||
// type conversion is required.
|
|||
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { |
|||
metric, err := m.MetricVec.GetMetricWith(labels) |
|||
if metric != nil { |
|||
return metric.(Histogram), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|||
// error, WithLabelValues allows shortcuts like
|
|||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
|||
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { |
|||
return m.MetricVec.WithLabelValues(lvs...).(Histogram) |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|||
// returned an error. By not returning an error, With allows shortcuts like
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
|||
func (m *HistogramVec) With(labels Labels) Histogram { |
|||
return m.MetricVec.With(labels).(Histogram) |
|||
} |
|||
|
|||
type constHistogram struct { |
|||
desc *Desc |
|||
count uint64 |
|||
sum float64 |
|||
buckets map[float64]uint64 |
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
func (h *constHistogram) Desc() *Desc { |
|||
return h.desc |
|||
} |
|||
|
|||
func (h *constHistogram) Write(out *dto.Metric) error { |
|||
his := &dto.Histogram{} |
|||
buckets := make([]*dto.Bucket, 0, len(h.buckets)) |
|||
|
|||
his.SampleCount = proto.Uint64(h.count) |
|||
his.SampleSum = proto.Float64(h.sum) |
|||
|
|||
for upperBound, count := range h.buckets { |
|||
buckets = append(buckets, &dto.Bucket{ |
|||
CumulativeCount: proto.Uint64(count), |
|||
UpperBound: proto.Float64(upperBound), |
|||
}) |
|||
} |
|||
|
|||
if len(buckets) > 0 { |
|||
sort.Sort(buckSort(buckets)) |
|||
} |
|||
his.Bucket = buckets |
|||
|
|||
out.Histogram = his |
|||
out.Label = h.labelPairs |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// NewConstHistogram returns a metric representing a Prometheus histogram with
|
|||
// fixed values for the count, sum, and bucket counts. As those parameters
|
|||
// cannot be changed, the returned value does not implement the Histogram
|
|||
// interface (but only the Metric interface). Users of this package will not
|
|||
// have much use for it in regular operations. However, when implementing custom
|
|||
// Collectors, it is useful as a throw-away metric that is generated on the fly
|
|||
// to send it to Prometheus in the Collect method.
|
|||
//
|
|||
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
|
|||
// bucket.
|
|||
//
|
|||
// NewConstHistogram returns an error if the length of labelValues is not
|
|||
// consistent with the variable labels in Desc.
|
|||
func NewConstHistogram( |
|||
desc *Desc, |
|||
count uint64, |
|||
sum float64, |
|||
buckets map[float64]uint64, |
|||
labelValues ...string, |
|||
) (Metric, error) { |
|||
if len(desc.variableLabels) != len(labelValues) { |
|||
return nil, errInconsistentCardinality |
|||
} |
|||
return &constHistogram{ |
|||
desc: desc, |
|||
count: count, |
|||
sum: sum, |
|||
buckets: buckets, |
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
}, nil |
|||
} |
|||
|
|||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
|||
// NewConstMetric would have returned an error.
|
|||
func MustNewConstHistogram( |
|||
desc *Desc, |
|||
count uint64, |
|||
sum float64, |
|||
buckets map[float64]uint64, |
|||
labelValues ...string, |
|||
) Metric { |
|||
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return m |
|||
} |
|||
|
|||
type buckSort []*dto.Bucket |
|||
|
|||
func (s buckSort) Len() int { |
|||
return len(s) |
|||
} |
|||
|
|||
func (s buckSort) Swap(i, j int) { |
|||
s[i], s[j] = s[j], s[i] |
|||
} |
|||
|
|||
func (s buckSort) Less(i, j int) bool { |
|||
return s[i].GetUpperBound() < s[j].GetUpperBound() |
|||
} |
|||
@ -0,0 +1,326 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"math" |
|||
"math/rand" |
|||
"reflect" |
|||
"sort" |
|||
"sync" |
|||
"testing" |
|||
"testing/quick" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func benchmarkHistogramObserve(w int, b *testing.B) { |
|||
b.StopTimer() |
|||
|
|||
wg := new(sync.WaitGroup) |
|||
wg.Add(w) |
|||
|
|||
g := new(sync.WaitGroup) |
|||
g.Add(1) |
|||
|
|||
s := NewHistogram(HistogramOpts{}) |
|||
|
|||
for i := 0; i < w; i++ { |
|||
go func() { |
|||
g.Wait() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
s.Observe(float64(i)) |
|||
} |
|||
|
|||
wg.Done() |
|||
}() |
|||
} |
|||
|
|||
b.StartTimer() |
|||
g.Done() |
|||
wg.Wait() |
|||
} |
|||
|
|||
func BenchmarkHistogramObserve1(b *testing.B) { |
|||
benchmarkHistogramObserve(1, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramObserve2(b *testing.B) { |
|||
benchmarkHistogramObserve(2, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramObserve4(b *testing.B) { |
|||
benchmarkHistogramObserve(4, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramObserve8(b *testing.B) { |
|||
benchmarkHistogramObserve(8, b) |
|||
} |
|||
|
|||
func benchmarkHistogramWrite(w int, b *testing.B) { |
|||
b.StopTimer() |
|||
|
|||
wg := new(sync.WaitGroup) |
|||
wg.Add(w) |
|||
|
|||
g := new(sync.WaitGroup) |
|||
g.Add(1) |
|||
|
|||
s := NewHistogram(HistogramOpts{}) |
|||
|
|||
for i := 0; i < 1000000; i++ { |
|||
s.Observe(float64(i)) |
|||
} |
|||
|
|||
for j := 0; j < w; j++ { |
|||
outs := make([]dto.Metric, b.N) |
|||
|
|||
go func(o []dto.Metric) { |
|||
g.Wait() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
s.Write(&o[i]) |
|||
} |
|||
|
|||
wg.Done() |
|||
}(outs) |
|||
} |
|||
|
|||
b.StartTimer() |
|||
g.Done() |
|||
wg.Wait() |
|||
} |
|||
|
|||
func BenchmarkHistogramWrite1(b *testing.B) { |
|||
benchmarkHistogramWrite(1, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramWrite2(b *testing.B) { |
|||
benchmarkHistogramWrite(2, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramWrite4(b *testing.B) { |
|||
benchmarkHistogramWrite(4, b) |
|||
} |
|||
|
|||
func BenchmarkHistogramWrite8(b *testing.B) { |
|||
benchmarkHistogramWrite(8, b) |
|||
} |
|||
|
|||
// Intentionally adding +Inf here to test if that case is handled correctly.
|
|||
// Also, getCumulativeCounts depends on it.
|
|||
var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} |
|||
|
|||
func TestHistogramConcurrency(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping test in short mode.") |
|||
} |
|||
|
|||
rand.Seed(42) |
|||
|
|||
it := func(n uint32) bool { |
|||
mutations := int(n%1e4 + 1e4) |
|||
concLevel := int(n%5 + 1) |
|||
total := mutations * concLevel |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
sum := NewHistogram(HistogramOpts{ |
|||
Name: "test_histogram", |
|||
Help: "helpless", |
|||
Buckets: testBuckets, |
|||
}) |
|||
|
|||
allVars := make([]float64, total) |
|||
var sampleSum float64 |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
v := rand.NormFloat64() |
|||
vals[j] = v |
|||
allVars[i*mutations+j] = v |
|||
sampleSum += v |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for _, v := range vals { |
|||
sum.Observe(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
sort.Float64s(allVars) |
|||
start.Done() |
|||
end.Wait() |
|||
|
|||
m := &dto.Metric{} |
|||
sum.Write(m) |
|||
if got, want := int(*m.Histogram.SampleCount), total; got != want { |
|||
t.Errorf("got sample count %d, want %d", got, want) |
|||
} |
|||
if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { |
|||
t.Errorf("got sample sum %f, want %f", got, want) |
|||
} |
|||
|
|||
wantCounts := getCumulativeCounts(allVars) |
|||
|
|||
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { |
|||
t.Errorf("got %d buckets in protobuf, want %d", got, want) |
|||
} |
|||
for i, wantBound := range testBuckets { |
|||
if i == len(testBuckets)-1 { |
|||
break // No +Inf bucket in protobuf.
|
|||
} |
|||
if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { |
|||
t.Errorf("got bound %f, want %f", gotBound, wantBound) |
|||
} |
|||
if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { |
|||
t.Errorf("got count %d, want %d", gotCount, wantCount) |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Error(err) |
|||
} |
|||
} |
|||
|
|||
func TestHistogramVecConcurrency(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping test in short mode.") |
|||
} |
|||
|
|||
rand.Seed(42) |
|||
|
|||
objectives := make([]float64, 0, len(DefObjectives)) |
|||
for qu := range DefObjectives { |
|||
|
|||
objectives = append(objectives, qu) |
|||
} |
|||
sort.Float64s(objectives) |
|||
|
|||
it := func(n uint32) bool { |
|||
mutations := int(n%1e4 + 1e4) |
|||
concLevel := int(n%7 + 1) |
|||
vecLength := int(n%3 + 1) |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
his := NewHistogramVec( |
|||
HistogramOpts{ |
|||
Name: "test_histogram", |
|||
Help: "helpless", |
|||
Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, |
|||
}, |
|||
[]string{"label"}, |
|||
) |
|||
|
|||
allVars := make([][]float64, vecLength) |
|||
sampleSums := make([]float64, vecLength) |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
picks := make([]int, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
v := rand.NormFloat64() |
|||
vals[j] = v |
|||
pick := rand.Intn(vecLength) |
|||
picks[j] = pick |
|||
allVars[pick] = append(allVars[pick], v) |
|||
sampleSums[pick] += v |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for i, v := range vals { |
|||
his.WithLabelValues(string('A' + picks[i])).Observe(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
for _, vars := range allVars { |
|||
sort.Float64s(vars) |
|||
} |
|||
start.Done() |
|||
end.Wait() |
|||
|
|||
for i := 0; i < vecLength; i++ { |
|||
m := &dto.Metric{} |
|||
s := his.WithLabelValues(string('A' + i)) |
|||
s.Write(m) |
|||
|
|||
if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { |
|||
t.Errorf("got %d buckets in protobuf, want %d", got, want) |
|||
} |
|||
if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { |
|||
t.Errorf("got sample count %d, want %d", got, want) |
|||
} |
|||
if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { |
|||
t.Errorf("got sample sum %f, want %f", got, want) |
|||
} |
|||
|
|||
wantCounts := getCumulativeCounts(allVars[i]) |
|||
|
|||
for j, wantBound := range testBuckets { |
|||
if j == len(testBuckets)-1 { |
|||
break // No +Inf bucket in protobuf.
|
|||
} |
|||
if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { |
|||
t.Errorf("got bound %f, want %f", gotBound, wantBound) |
|||
} |
|||
if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { |
|||
t.Errorf("got count %d, want %d", gotCount, wantCount) |
|||
} |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Error(err) |
|||
} |
|||
} |
|||
|
|||
func getCumulativeCounts(vars []float64) []uint64 { |
|||
counts := make([]uint64, len(testBuckets)) |
|||
for _, v := range vars { |
|||
for i := len(testBuckets) - 1; i >= 0; i-- { |
|||
if v > testBuckets[i] { |
|||
break |
|||
} |
|||
counts[i]++ |
|||
} |
|||
} |
|||
return counts |
|||
} |
|||
|
|||
func TestBuckets(t *testing.T) { |
|||
got := LinearBuckets(-15, 5, 6) |
|||
want := []float64{-15, -10, -5, 0, 5, 10} |
|||
if !reflect.DeepEqual(got, want) { |
|||
t.Errorf("linear buckets: got %v, want %v", got, want) |
|||
} |
|||
|
|||
got = ExponentialBuckets(100, 1.2, 3) |
|||
want = []float64{100, 120, 144} |
|||
if !reflect.DeepEqual(got, want) { |
|||
t.Errorf("linear buckets: got %v, want %v", got, want) |
|||
} |
|||
} |
|||
@ -0,0 +1,499 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"bufio" |
|||
"bytes" |
|||
"compress/gzip" |
|||
"fmt" |
|||
"io" |
|||
"net" |
|||
"net/http" |
|||
"strconv" |
|||
"strings" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/prometheus/common/expfmt" |
|||
) |
|||
|
|||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
|||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
|||
// related should live. The functions here are just for avoiding
|
|||
// breakage. Everything is deprecated.
|
|||
|
|||
const ( |
|||
contentTypeHeader = "Content-Type" |
|||
contentLengthHeader = "Content-Length" |
|||
contentEncodingHeader = "Content-Encoding" |
|||
acceptEncodingHeader = "Accept-Encoding" |
|||
) |
|||
|
|||
var bufPool sync.Pool |
|||
|
|||
func getBuf() *bytes.Buffer { |
|||
buf := bufPool.Get() |
|||
if buf == nil { |
|||
return &bytes.Buffer{} |
|||
} |
|||
return buf.(*bytes.Buffer) |
|||
} |
|||
|
|||
func giveBuf(buf *bytes.Buffer) { |
|||
buf.Reset() |
|||
bufPool.Put(buf) |
|||
} |
|||
|
|||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
|||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
|||
// name).
|
|||
//
|
|||
// Deprecated: Please note the issues described in the doc comment of
|
|||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
|||
// (which is non instrumented).
|
|||
func Handler() http.Handler { |
|||
return InstrumentHandler("prometheus", UninstrumentedHandler()) |
|||
} |
|||
|
|||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
|||
//
|
|||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
|||
func UninstrumentedHandler() http.Handler { |
|||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { |
|||
mfs, err := DefaultGatherer.Gather() |
|||
if err != nil { |
|||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
|
|||
contentType := expfmt.Negotiate(req.Header) |
|||
buf := getBuf() |
|||
defer giveBuf(buf) |
|||
writer, encoding := decorateWriter(req, buf) |
|||
enc := expfmt.NewEncoder(writer, contentType) |
|||
var lastErr error |
|||
for _, mf := range mfs { |
|||
if err := enc.Encode(mf); err != nil { |
|||
lastErr = err |
|||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
} |
|||
if closer, ok := writer.(io.Closer); ok { |
|||
closer.Close() |
|||
} |
|||
if lastErr != nil && buf.Len() == 0 { |
|||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
header := w.Header() |
|||
header.Set(contentTypeHeader, string(contentType)) |
|||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) |
|||
if encoding != "" { |
|||
header.Set(contentEncodingHeader, encoding) |
|||
} |
|||
w.Write(buf.Bytes()) |
|||
}) |
|||
} |
|||
|
|||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|||
// (which is empty if no compression is enabled).
|
|||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { |
|||
header := request.Header.Get(acceptEncodingHeader) |
|||
parts := strings.Split(header, ",") |
|||
for _, part := range parts { |
|||
part := strings.TrimSpace(part) |
|||
if part == "gzip" || strings.HasPrefix(part, "gzip;") { |
|||
return gzip.NewWriter(writer), "gzip" |
|||
} |
|||
} |
|||
return writer, "" |
|||
} |
|||
|
|||
var instLabels = []string{"method", "code"} |
|||
|
|||
type nower interface { |
|||
Now() time.Time |
|||
} |
|||
|
|||
type nowFunc func() time.Time |
|||
|
|||
func (n nowFunc) Now() time.Time { |
|||
return n() |
|||
} |
|||
|
|||
var now nower = nowFunc(func() time.Time { |
|||
return time.Now() |
|||
}) |
|||
|
|||
func nowSeries(t ...time.Time) nower { |
|||
return nowFunc(func() time.Time { |
|||
defer func() { |
|||
t = t[1:] |
|||
}() |
|||
|
|||
return t[0] |
|||
}) |
|||
} |
|||
|
|||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
|||
// registers four metric collectors (if not already done) and reports HTTP
|
|||
// metrics to the (newly or already) registered collectors: http_requests_total
|
|||
// (CounterVec), http_request_duration_microseconds (Summary),
|
|||
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
|
|||
// has a constant label named "handler" with the provided handlerName as
|
|||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
|||
// (label name "method") and HTTP status code (label name "code").
|
|||
//
|
|||
// Deprecated: InstrumentHandler has several issues:
|
|||
//
|
|||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
|||
// aggregation across multiple instances is required.
|
|||
//
|
|||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
|||
// seconds.
|
|||
//
|
|||
// - The size of the request is calculated in a separate goroutine. Since this
|
|||
// calculator requires access to the request header, it creates a race with
|
|||
// any writes to the header performed during request handling.
|
|||
// httputil.ReverseProxy is a prominent example for a handler
|
|||
// performing such writes.
|
|||
//
|
|||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
|||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
|||
// instrumentation in the meantime.
|
|||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { |
|||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) |
|||
} |
|||
|
|||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
|||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
|||
// issues).
|
|||
//
|
|||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
|||
// InstrumentHandler is.
|
|||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { |
|||
return InstrumentHandlerFuncWithOpts( |
|||
SummaryOpts{ |
|||
Subsystem: "http", |
|||
ConstLabels: Labels{"handler": handlerName}, |
|||
}, |
|||
handlerFunc, |
|||
) |
|||
} |
|||
|
|||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
|||
// issues) but provides more flexibility (at the cost of a more complex call
|
|||
// syntax). As InstrumentHandler, this function registers four metric
|
|||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
|||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
|||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
|||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
|||
// help string. The names of the variable labels of the http_requests_total
|
|||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
|||
//
|
|||
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
|
|||
// behavior of InstrumentHandler:
|
|||
//
|
|||
// prometheus.InstrumentHandlerWithOpts(
|
|||
// prometheus.SummaryOpts{
|
|||
// Subsystem: "http",
|
|||
// ConstLabels: prometheus.Labels{"handler": handlerName},
|
|||
// },
|
|||
// handler,
|
|||
// )
|
|||
//
|
|||
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
|
|||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
|||
// and all its fields are set to the equally named fields in the provided
|
|||
// SummaryOpts.
|
|||
//
|
|||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
|||
// InstrumentHandler is.
|
|||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { |
|||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) |
|||
} |
|||
|
|||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
|||
// the same issues) but provides more flexibility (at the cost of a more complex
|
|||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
|||
// SummaryOpts are used.
|
|||
//
|
|||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
|||
// as InstrumentHandler is.
|
|||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { |
|||
reqCnt := NewCounterVec( |
|||
CounterOpts{ |
|||
Namespace: opts.Namespace, |
|||
Subsystem: opts.Subsystem, |
|||
Name: "requests_total", |
|||
Help: "Total number of HTTP requests made.", |
|||
ConstLabels: opts.ConstLabels, |
|||
}, |
|||
instLabels, |
|||
) |
|||
|
|||
opts.Name = "request_duration_microseconds" |
|||
opts.Help = "The HTTP request latencies in microseconds." |
|||
reqDur := NewSummary(opts) |
|||
|
|||
opts.Name = "request_size_bytes" |
|||
opts.Help = "The HTTP request sizes in bytes." |
|||
reqSz := NewSummary(opts) |
|||
|
|||
opts.Name = "response_size_bytes" |
|||
opts.Help = "The HTTP response sizes in bytes." |
|||
resSz := NewSummary(opts) |
|||
|
|||
regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) |
|||
regReqDur := MustRegisterOrGet(reqDur).(Summary) |
|||
regReqSz := MustRegisterOrGet(reqSz).(Summary) |
|||
regResSz := MustRegisterOrGet(resSz).(Summary) |
|||
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
|||
now := time.Now() |
|||
|
|||
delegate := &responseWriterDelegator{ResponseWriter: w} |
|||
out := computeApproximateRequestSize(r) |
|||
|
|||
_, cn := w.(http.CloseNotifier) |
|||
_, fl := w.(http.Flusher) |
|||
_, hj := w.(http.Hijacker) |
|||
_, rf := w.(io.ReaderFrom) |
|||
var rw http.ResponseWriter |
|||
if cn && fl && hj && rf { |
|||
rw = &fancyResponseWriterDelegator{delegate} |
|||
} else { |
|||
rw = delegate |
|||
} |
|||
handlerFunc(rw, r) |
|||
|
|||
elapsed := float64(time.Since(now)) / float64(time.Microsecond) |
|||
|
|||
method := sanitizeMethod(r.Method) |
|||
code := sanitizeCode(delegate.status) |
|||
regReqCnt.WithLabelValues(method, code).Inc() |
|||
regReqDur.Observe(elapsed) |
|||
regResSz.Observe(float64(delegate.written)) |
|||
regReqSz.Observe(float64(<-out)) |
|||
}) |
|||
} |
|||
|
|||
func computeApproximateRequestSize(r *http.Request) <-chan int { |
|||
// Get URL length in current go routine for avoiding a race condition.
|
|||
// HandlerFunc that runs in parallel may modify the URL.
|
|||
s := 0 |
|||
if r.URL != nil { |
|||
s += len(r.URL.String()) |
|||
} |
|||
|
|||
out := make(chan int, 1) |
|||
|
|||
go func() { |
|||
s += len(r.Method) |
|||
s += len(r.Proto) |
|||
for name, values := range r.Header { |
|||
s += len(name) |
|||
for _, value := range values { |
|||
s += len(value) |
|||
} |
|||
} |
|||
s += len(r.Host) |
|||
|
|||
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
|
|||
|
|||
if r.ContentLength != -1 { |
|||
s += int(r.ContentLength) |
|||
} |
|||
out <- s |
|||
close(out) |
|||
}() |
|||
|
|||
return out |
|||
} |
|||
|
|||
type responseWriterDelegator struct { |
|||
http.ResponseWriter |
|||
|
|||
handler, method string |
|||
status int |
|||
written int64 |
|||
wroteHeader bool |
|||
} |
|||
|
|||
func (r *responseWriterDelegator) WriteHeader(code int) { |
|||
r.status = code |
|||
r.wroteHeader = true |
|||
r.ResponseWriter.WriteHeader(code) |
|||
} |
|||
|
|||
func (r *responseWriterDelegator) Write(b []byte) (int, error) { |
|||
if !r.wroteHeader { |
|||
r.WriteHeader(http.StatusOK) |
|||
} |
|||
n, err := r.ResponseWriter.Write(b) |
|||
r.written += int64(n) |
|||
return n, err |
|||
} |
|||
|
|||
type fancyResponseWriterDelegator struct { |
|||
*responseWriterDelegator |
|||
} |
|||
|
|||
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { |
|||
return f.ResponseWriter.(http.CloseNotifier).CloseNotify() |
|||
} |
|||
|
|||
func (f *fancyResponseWriterDelegator) Flush() { |
|||
f.ResponseWriter.(http.Flusher).Flush() |
|||
} |
|||
|
|||
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { |
|||
return f.ResponseWriter.(http.Hijacker).Hijack() |
|||
} |
|||
|
|||
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { |
|||
if !f.wroteHeader { |
|||
f.WriteHeader(http.StatusOK) |
|||
} |
|||
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) |
|||
f.written += n |
|||
return n, err |
|||
} |
|||
|
|||
func sanitizeMethod(m string) string { |
|||
switch m { |
|||
case "GET", "get": |
|||
return "get" |
|||
case "PUT", "put": |
|||
return "put" |
|||
case "HEAD", "head": |
|||
return "head" |
|||
case "POST", "post": |
|||
return "post" |
|||
case "DELETE", "delete": |
|||
return "delete" |
|||
case "CONNECT", "connect": |
|||
return "connect" |
|||
case "OPTIONS", "options": |
|||
return "options" |
|||
case "NOTIFY", "notify": |
|||
return "notify" |
|||
default: |
|||
return strings.ToLower(m) |
|||
} |
|||
} |
|||
|
|||
func sanitizeCode(s int) string { |
|||
switch s { |
|||
case 100: |
|||
return "100" |
|||
case 101: |
|||
return "101" |
|||
|
|||
case 200: |
|||
return "200" |
|||
case 201: |
|||
return "201" |
|||
case 202: |
|||
return "202" |
|||
case 203: |
|||
return "203" |
|||
case 204: |
|||
return "204" |
|||
case 205: |
|||
return "205" |
|||
case 206: |
|||
return "206" |
|||
|
|||
case 300: |
|||
return "300" |
|||
case 301: |
|||
return "301" |
|||
case 302: |
|||
return "302" |
|||
case 304: |
|||
return "304" |
|||
case 305: |
|||
return "305" |
|||
case 307: |
|||
return "307" |
|||
|
|||
case 400: |
|||
return "400" |
|||
case 401: |
|||
return "401" |
|||
case 402: |
|||
return "402" |
|||
case 403: |
|||
return "403" |
|||
case 404: |
|||
return "404" |
|||
case 405: |
|||
return "405" |
|||
case 406: |
|||
return "406" |
|||
case 407: |
|||
return "407" |
|||
case 408: |
|||
return "408" |
|||
case 409: |
|||
return "409" |
|||
case 410: |
|||
return "410" |
|||
case 411: |
|||
return "411" |
|||
case 412: |
|||
return "412" |
|||
case 413: |
|||
return "413" |
|||
case 414: |
|||
return "414" |
|||
case 415: |
|||
return "415" |
|||
case 416: |
|||
return "416" |
|||
case 417: |
|||
return "417" |
|||
case 418: |
|||
return "418" |
|||
|
|||
case 500: |
|||
return "500" |
|||
case 501: |
|||
return "501" |
|||
case 502: |
|||
return "502" |
|||
case 503: |
|||
return "503" |
|||
case 504: |
|||
return "504" |
|||
case 505: |
|||
return "505" |
|||
|
|||
case 428: |
|||
return "428" |
|||
case 429: |
|||
return "429" |
|||
case 431: |
|||
return "431" |
|||
case 511: |
|||
return "511" |
|||
|
|||
default: |
|||
return strconv.Itoa(s) |
|||
} |
|||
} |
|||
@ -0,0 +1,121 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"net/http" |
|||
"net/http/httptest" |
|||
"testing" |
|||
"time" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
type respBody string |
|||
|
|||
func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { |
|||
w.WriteHeader(http.StatusTeapot) |
|||
w.Write([]byte(b)) |
|||
} |
|||
|
|||
func TestInstrumentHandler(t *testing.T) { |
|||
defer func(n nower) { |
|||
now = n.(nower) |
|||
}(now) |
|||
|
|||
instant := time.Now() |
|||
end := instant.Add(30 * time.Second) |
|||
now = nowSeries(instant, end) |
|||
respBody := respBody("Howdy there!") |
|||
|
|||
hndlr := InstrumentHandler("test-handler", respBody) |
|||
|
|||
opts := SummaryOpts{ |
|||
Subsystem: "http", |
|||
ConstLabels: Labels{"handler": "test-handler"}, |
|||
} |
|||
|
|||
reqCnt := MustRegisterOrGet(NewCounterVec( |
|||
CounterOpts{ |
|||
Namespace: opts.Namespace, |
|||
Subsystem: opts.Subsystem, |
|||
Name: "requests_total", |
|||
Help: "Total number of HTTP requests made.", |
|||
ConstLabels: opts.ConstLabels, |
|||
}, |
|||
instLabels, |
|||
)).(*CounterVec) |
|||
|
|||
opts.Name = "request_duration_microseconds" |
|||
opts.Help = "The HTTP request latencies in microseconds." |
|||
reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) |
|||
|
|||
opts.Name = "request_size_bytes" |
|||
opts.Help = "The HTTP request sizes in bytes." |
|||
MustRegisterOrGet(NewSummary(opts)) |
|||
|
|||
opts.Name = "response_size_bytes" |
|||
opts.Help = "The HTTP response sizes in bytes." |
|||
MustRegisterOrGet(NewSummary(opts)) |
|||
|
|||
reqCnt.Reset() |
|||
|
|||
resp := httptest.NewRecorder() |
|||
req := &http.Request{ |
|||
Method: "GET", |
|||
} |
|||
|
|||
hndlr.ServeHTTP(resp, req) |
|||
|
|||
if resp.Code != http.StatusTeapot { |
|||
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) |
|||
} |
|||
if string(resp.Body.Bytes()) != "Howdy there!" { |
|||
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) |
|||
} |
|||
|
|||
out := &dto.Metric{} |
|||
reqDur.Write(out) |
|||
if want, got := "test-handler", out.Label[0].GetValue(); want != got { |
|||
t.Errorf("want label value %q in reqDur, got %q", want, got) |
|||
} |
|||
if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { |
|||
t.Errorf("want sample count %d in reqDur, got %d", want, got) |
|||
} |
|||
|
|||
out.Reset() |
|||
if want, got := 1, len(reqCnt.children); want != got { |
|||
t.Errorf("want %d children in reqCnt, got %d", want, got) |
|||
} |
|||
cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
cnt.Write(out) |
|||
if want, got := "418", out.Label[0].GetValue(); want != got { |
|||
t.Errorf("want label value %q in reqCnt, got %q", want, got) |
|||
} |
|||
if want, got := "test-handler", out.Label[1].GetValue(); want != got { |
|||
t.Errorf("want label value %q in reqCnt, got %q", want, got) |
|||
} |
|||
if want, got := "get", out.Label[2].GetValue(); want != got { |
|||
t.Errorf("want label value %q in reqCnt, got %q", want, got) |
|||
} |
|||
if out.Counter == nil { |
|||
t.Fatal("expected non-nil counter in reqCnt") |
|||
} |
|||
if want, got := 1., out.Counter.GetValue(); want != got { |
|||
t.Errorf("want reqCnt of %f, got %f", want, got) |
|||
} |
|||
} |
|||
@ -0,0 +1,166 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"strings" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
const separatorByte byte = 255 |
|||
|
|||
// A Metric models a single sample value with its meta data being exported to
|
|||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
|||
// Histogram, Summary, and Untyped.
|
|||
type Metric interface { |
|||
// Desc returns the descriptor for the Metric. This method idempotently
|
|||
// returns the same descriptor throughout the lifetime of the
|
|||
// Metric. The returned descriptor is immutable by contract. A Metric
|
|||
// unable to describe itself must return an invalid descriptor (created
|
|||
// with NewInvalidDesc).
|
|||
Desc() *Desc |
|||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
|||
// transmission object.
|
|||
//
|
|||
// Metric implementations must observe concurrency safety as reads of
|
|||
// this metric may occur at any time, and any blocking occurs at the
|
|||
// expense of total performance of rendering all registered
|
|||
// metrics. Ideally, Metric implementations should support concurrent
|
|||
// readers.
|
|||
//
|
|||
// While populating dto.Metric, it is the responsibility of the
|
|||
// implementation to ensure validity of the Metric protobuf (like valid
|
|||
// UTF-8 strings or syntactically valid metric and label names). It is
|
|||
// recommended to sort labels lexicographically. (Implementers may find
|
|||
// LabelPairSorter useful for that.) Callers of Write should still make
|
|||
// sure of sorting if they depend on it.
|
|||
Write(*dto.Metric) error |
|||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
|||
// dto.Metric protobuf to save allocations has disappeared. The
|
|||
// signature of this method should be changed to "Write() (*dto.Metric,
|
|||
// error)".
|
|||
} |
|||
|
|||
// Opts bundles the options for creating most Metric types. Each metric
|
|||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
|||
// an alias of this type (which might change when the requirement arises.)
|
|||
//
|
|||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
|||
// are optional and can safely be left at their zero value.
|
|||
type Opts struct { |
|||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|||
// name of the Metric (created by joining these components with
|
|||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|||
// name. Note that the fully-qualified name of the metric must be a
|
|||
// valid Prometheus metric name.
|
|||
Namespace string |
|||
Subsystem string |
|||
Name string |
|||
|
|||
// Help provides information about this metric. Mandatory!
|
|||
//
|
|||
// Metrics with the same fully-qualified name must have the same Help
|
|||
// string.
|
|||
Help string |
|||
|
|||
// ConstLabels are used to attach fixed labels to this metric. Metrics
|
|||
// with the same fully-qualified name must have the same label names in
|
|||
// their ConstLabels.
|
|||
//
|
|||
// Note that in most cases, labels have a value that varies during the
|
|||
// lifetime of a process. Those labels are usually managed with a metric
|
|||
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
|
|||
// serve only special purposes. One is for the special case where the
|
|||
// value of a label does not change during the lifetime of a process,
|
|||
// e.g. if the revision of the running binary is put into a
|
|||
// label. Another, more advanced purpose is if more than one Collector
|
|||
// needs to collect Metrics with the same fully-qualified name. In that
|
|||
// case, those Metrics must differ in the values of their
|
|||
// ConstLabels. See the Collector examples.
|
|||
//
|
|||
// If the value of a label never changes (not even between binaries),
|
|||
// that label most likely should not be a label at all (but part of the
|
|||
// metric name).
|
|||
ConstLabels Labels |
|||
} |
|||
|
|||
// BuildFQName joins the given three name components by "_". Empty name
|
|||
// components are ignored. If the name parameter itself is empty, an empty
|
|||
// string is returned, no matter what. Metric implementations included in this
|
|||
// library use this function internally to generate the fully-qualified metric
|
|||
// name from the name component in their Opts. Users of the library will only
|
|||
// need this function if they implement their own Metric or instantiate a Desc
|
|||
// (with NewDesc) directly.
|
|||
func BuildFQName(namespace, subsystem, name string) string { |
|||
if name == "" { |
|||
return "" |
|||
} |
|||
switch { |
|||
case namespace != "" && subsystem != "": |
|||
return strings.Join([]string{namespace, subsystem, name}, "_") |
|||
case namespace != "": |
|||
return strings.Join([]string{namespace, name}, "_") |
|||
case subsystem != "": |
|||
return strings.Join([]string{subsystem, name}, "_") |
|||
} |
|||
return name |
|||
} |
|||
|
|||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
|||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
|||
// custom metrics.
|
|||
type LabelPairSorter []*dto.LabelPair |
|||
|
|||
func (s LabelPairSorter) Len() int { |
|||
return len(s) |
|||
} |
|||
|
|||
func (s LabelPairSorter) Swap(i, j int) { |
|||
s[i], s[j] = s[j], s[i] |
|||
} |
|||
|
|||
func (s LabelPairSorter) Less(i, j int) bool { |
|||
return s[i].GetName() < s[j].GetName() |
|||
} |
|||
|
|||
type hashSorter []uint64 |
|||
|
|||
func (s hashSorter) Len() int { |
|||
return len(s) |
|||
} |
|||
|
|||
func (s hashSorter) Swap(i, j int) { |
|||
s[i], s[j] = s[j], s[i] |
|||
} |
|||
|
|||
func (s hashSorter) Less(i, j int) bool { |
|||
return s[i] < s[j] |
|||
} |
|||
|
|||
type invalidMetric struct { |
|||
desc *Desc |
|||
err error |
|||
} |
|||
|
|||
// NewInvalidMetric returns a metric whose Write method always returns the
|
|||
// provided error. It is useful if a Collector finds itself unable to collect
|
|||
// a metric and wishes to report an error to the registry.
|
|||
func NewInvalidMetric(desc *Desc, err error) Metric { |
|||
return &invalidMetric{desc, err} |
|||
} |
|||
|
|||
func (m *invalidMetric) Desc() *Desc { return m.desc } |
|||
|
|||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err } |
|||
@ -0,0 +1,35 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import "testing" |
|||
|
|||
func TestBuildFQName(t *testing.T) { |
|||
scenarios := []struct{ namespace, subsystem, name, result string }{ |
|||
{"a", "b", "c", "a_b_c"}, |
|||
{"", "b", "c", "b_c"}, |
|||
{"a", "", "c", "a_c"}, |
|||
{"", "", "c", "c"}, |
|||
{"a", "b", "", ""}, |
|||
{"a", "", "", ""}, |
|||
{"", "b", "", ""}, |
|||
{" ", "", "", ""}, |
|||
} |
|||
|
|||
for i, s := range scenarios { |
|||
if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { |
|||
t.Errorf("%d. want %s, got %s", i, want, got) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,142 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import "github.com/prometheus/procfs" |
|||
|
|||
type processCollector struct { |
|||
pid int |
|||
collectFn func(chan<- Metric) |
|||
pidFn func() (int, error) |
|||
cpuTotal Counter |
|||
openFDs, maxFDs Gauge |
|||
vsize, rss Gauge |
|||
startTime Gauge |
|||
} |
|||
|
|||
// NewProcessCollector returns a collector which exports the current state of
|
|||
// process metrics including cpu, memory and file descriptor usage as well as
|
|||
// the process start time for the given process id under the given namespace.
|
|||
func NewProcessCollector(pid int, namespace string) Collector { |
|||
return NewProcessCollectorPIDFn( |
|||
func() (int, error) { return pid, nil }, |
|||
namespace, |
|||
) |
|||
} |
|||
|
|||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
|||
// of process metrics including cpu, memory and file descriptor usage as well
|
|||
// as the process start time under the given namespace. The given pidFn is
|
|||
// called on each collect and is used to determine the process to export
|
|||
// metrics for.
|
|||
func NewProcessCollectorPIDFn( |
|||
pidFn func() (int, error), |
|||
namespace string, |
|||
) Collector { |
|||
c := processCollector{ |
|||
pidFn: pidFn, |
|||
collectFn: func(chan<- Metric) {}, |
|||
|
|||
cpuTotal: NewCounter(CounterOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_cpu_seconds_total", |
|||
Help: "Total user and system CPU time spent in seconds.", |
|||
}), |
|||
openFDs: NewGauge(GaugeOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_open_fds", |
|||
Help: "Number of open file descriptors.", |
|||
}), |
|||
maxFDs: NewGauge(GaugeOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_max_fds", |
|||
Help: "Maximum number of open file descriptors.", |
|||
}), |
|||
vsize: NewGauge(GaugeOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_virtual_memory_bytes", |
|||
Help: "Virtual memory size in bytes.", |
|||
}), |
|||
rss: NewGauge(GaugeOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_resident_memory_bytes", |
|||
Help: "Resident memory size in bytes.", |
|||
}), |
|||
startTime: NewGauge(GaugeOpts{ |
|||
Namespace: namespace, |
|||
Name: "process_start_time_seconds", |
|||
Help: "Start time of the process since unix epoch in seconds.", |
|||
}), |
|||
} |
|||
|
|||
// Set up process metric collection if supported by the runtime.
|
|||
if _, err := procfs.NewStat(); err == nil { |
|||
c.collectFn = c.processCollect |
|||
} |
|||
|
|||
return &c |
|||
} |
|||
|
|||
// Describe returns all descriptions of the collector.
|
|||
func (c *processCollector) Describe(ch chan<- *Desc) { |
|||
ch <- c.cpuTotal.Desc() |
|||
ch <- c.openFDs.Desc() |
|||
ch <- c.maxFDs.Desc() |
|||
ch <- c.vsize.Desc() |
|||
ch <- c.rss.Desc() |
|||
ch <- c.startTime.Desc() |
|||
} |
|||
|
|||
// Collect returns the current state of all metrics of the collector.
|
|||
func (c *processCollector) Collect(ch chan<- Metric) { |
|||
c.collectFn(ch) |
|||
} |
|||
|
|||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
|||
// client allows users to configure the error behavior.
|
|||
func (c *processCollector) processCollect(ch chan<- Metric) { |
|||
pid, err := c.pidFn() |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
p, err := procfs.NewProc(pid) |
|||
if err != nil { |
|||
return |
|||
} |
|||
|
|||
if stat, err := p.NewStat(); err == nil { |
|||
c.cpuTotal.Set(stat.CPUTime()) |
|||
ch <- c.cpuTotal |
|||
c.vsize.Set(float64(stat.VirtualMemory())) |
|||
ch <- c.vsize |
|||
c.rss.Set(float64(stat.ResidentMemory())) |
|||
ch <- c.rss |
|||
|
|||
if startTime, err := stat.StartTime(); err == nil { |
|||
c.startTime.Set(startTime) |
|||
ch <- c.startTime |
|||
} |
|||
} |
|||
|
|||
if fds, err := p.FileDescriptorsLen(); err == nil { |
|||
c.openFDs.Set(float64(fds)) |
|||
ch <- c.openFDs |
|||
} |
|||
|
|||
if limits, err := p.NewLimits(); err == nil { |
|||
c.maxFDs.Set(float64(limits.OpenFiles)) |
|||
ch <- c.maxFDs |
|||
} |
|||
} |
|||
@ -0,0 +1,58 @@ |
|||
package prometheus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"os" |
|||
"regexp" |
|||
"testing" |
|||
|
|||
"github.com/prometheus/common/expfmt" |
|||
"github.com/prometheus/procfs" |
|||
) |
|||
|
|||
func TestProcessCollector(t *testing.T) { |
|||
if _, err := procfs.Self(); err != nil { |
|||
t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) |
|||
} |
|||
|
|||
registry := NewRegistry() |
|||
if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if err := registry.Register(NewProcessCollectorPIDFn( |
|||
func() (int, error) { return os.Getpid(), nil }, "foobar"), |
|||
); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
mfs, err := registry.Gather() |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
var buf bytes.Buffer |
|||
for _, mf := range mfs { |
|||
if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
|
|||
for _, re := range []*regexp.Regexp{ |
|||
regexp.MustCompile("process_cpu_seconds_total [0-9]"), |
|||
regexp.MustCompile("process_max_fds [1-9]"), |
|||
regexp.MustCompile("process_open_fds [1-9]"), |
|||
regexp.MustCompile("process_virtual_memory_bytes [1-9]"), |
|||
regexp.MustCompile("process_resident_memory_bytes [1-9]"), |
|||
regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), |
|||
regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), |
|||
regexp.MustCompile("foobar_process_max_fds [1-9]"), |
|||
regexp.MustCompile("foobar_process_open_fds [1-9]"), |
|||
regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), |
|||
regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), |
|||
regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), |
|||
} { |
|||
if !re.Match(buf.Bytes()) { |
|||
t.Errorf("want body to match %s\n%s", re, buf.String()) |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,201 @@ |
|||
// Copyright 2016 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Copyright (c) 2013, The Prometheus Authors
|
|||
// All rights reserved.
|
|||
//
|
|||
// Use of this source code is governed by a BSD-style license that can be found
|
|||
// in the LICENSE file.
|
|||
|
|||
// Package promhttp contains functions to create http.Handler instances to
|
|||
// expose Prometheus metrics via HTTP. In later versions of this package, it
|
|||
// will also contain tooling to instrument instances of http.Handler and
|
|||
// http.RoundTripper.
|
|||
//
|
|||
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
|
|||
// you can create a handler for a custom registry or anything that implements
|
|||
// the Gatherer interface. It also allows to create handlers that act
|
|||
// differently on errors or allow to log errors.
|
|||
package promhttp |
|||
|
|||
import ( |
|||
"bytes" |
|||
"compress/gzip" |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
"strings" |
|||
"sync" |
|||
|
|||
"github.com/prometheus/common/expfmt" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
const ( |
|||
contentTypeHeader = "Content-Type" |
|||
contentLengthHeader = "Content-Length" |
|||
contentEncodingHeader = "Content-Encoding" |
|||
acceptEncodingHeader = "Accept-Encoding" |
|||
) |
|||
|
|||
var bufPool sync.Pool |
|||
|
|||
func getBuf() *bytes.Buffer { |
|||
buf := bufPool.Get() |
|||
if buf == nil { |
|||
return &bytes.Buffer{} |
|||
} |
|||
return buf.(*bytes.Buffer) |
|||
} |
|||
|
|||
func giveBuf(buf *bytes.Buffer) { |
|||
buf.Reset() |
|||
bufPool.Put(buf) |
|||
} |
|||
|
|||
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
|
|||
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
|
|||
// error, no error logging, and compression if requested by the client.
|
|||
//
|
|||
// If you want to create a Handler for the DefaultGatherer with different
|
|||
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
|
|||
// your desired HandlerOpts.
|
|||
func Handler() http.Handler { |
|||
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) |
|||
} |
|||
|
|||
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
|
|||
// of the Handler is defined by the provided HandlerOpts.
|
|||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { |
|||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { |
|||
mfs, err := reg.Gather() |
|||
if err != nil { |
|||
if opts.ErrorLog != nil { |
|||
opts.ErrorLog.Println("error gathering metrics:", err) |
|||
} |
|||
switch opts.ErrorHandling { |
|||
case PanicOnError: |
|||
panic(err) |
|||
case ContinueOnError: |
|||
if len(mfs) == 0 { |
|||
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
case HTTPErrorOnError: |
|||
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
} |
|||
|
|||
contentType := expfmt.Negotiate(req.Header) |
|||
buf := getBuf() |
|||
defer giveBuf(buf) |
|||
writer, encoding := decorateWriter(req, buf, opts.DisableCompression) |
|||
enc := expfmt.NewEncoder(writer, contentType) |
|||
var lastErr error |
|||
for _, mf := range mfs { |
|||
if err := enc.Encode(mf); err != nil { |
|||
lastErr = err |
|||
if opts.ErrorLog != nil { |
|||
opts.ErrorLog.Println("error encoding metric family:", err) |
|||
} |
|||
switch opts.ErrorHandling { |
|||
case PanicOnError: |
|||
panic(err) |
|||
case ContinueOnError: |
|||
// Handled later.
|
|||
case HTTPErrorOnError: |
|||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
} |
|||
} |
|||
if closer, ok := writer.(io.Closer); ok { |
|||
closer.Close() |
|||
} |
|||
if lastErr != nil && buf.Len() == 0 { |
|||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) |
|||
return |
|||
} |
|||
header := w.Header() |
|||
header.Set(contentTypeHeader, string(contentType)) |
|||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) |
|||
if encoding != "" { |
|||
header.Set(contentEncodingHeader, encoding) |
|||
} |
|||
w.Write(buf.Bytes()) |
|||
// TODO(beorn7): Consider streaming serving of metrics.
|
|||
}) |
|||
} |
|||
|
|||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
|||
// errors.
|
|||
type HandlerErrorHandling int |
|||
|
|||
// These constants cause handlers serving metrics to behave as described if
|
|||
// errors are encountered.
|
|||
const ( |
|||
// Serve an HTTP status code 500 upon the first error
|
|||
// encountered. Report the error message in the body.
|
|||
HTTPErrorOnError HandlerErrorHandling = iota |
|||
// Ignore errors and try to serve as many metrics as possible. However,
|
|||
// if no metrics can be served, serve an HTTP status code 500 and the
|
|||
// last error message in the body. Only use this in deliberate "best
|
|||
// effort" metrics collection scenarios. It is recommended to at least
|
|||
// log errors (by providing an ErrorLog in HandlerOpts) to not mask
|
|||
// errors completely.
|
|||
ContinueOnError |
|||
// Panic upon the first error encountered (useful for "crash only" apps).
|
|||
PanicOnError |
|||
) |
|||
|
|||
// Logger is the minimal interface HandlerOpts needs for logging. Note that
|
|||
// log.Logger from the standard library implements this interface, and it is
|
|||
// easy to implement by custom loggers, if they don't do so already anyway.
|
|||
type Logger interface { |
|||
Println(v ...interface{}) |
|||
} |
|||
|
|||
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
|
|||
// zero value of HandlerOpts is a reasonable default.
|
|||
type HandlerOpts struct { |
|||
// ErrorLog specifies an optional logger for errors collecting and
|
|||
// serving metrics. If nil, errors are not logged at all.
|
|||
ErrorLog Logger |
|||
// ErrorHandling defines how errors are handled. Note that errors are
|
|||
// logged regardless of the configured ErrorHandling provided ErrorLog
|
|||
// is not nil.
|
|||
ErrorHandling HandlerErrorHandling |
|||
// If DisableCompression is true, the handler will never compress the
|
|||
// response, even if requested by the client.
|
|||
DisableCompression bool |
|||
} |
|||
|
|||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
|||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
|||
// (which is empty if no compression is enabled).
|
|||
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { |
|||
if compressionDisabled { |
|||
return writer, "" |
|||
} |
|||
header := request.Header.Get(acceptEncodingHeader) |
|||
parts := strings.Split(header, ",") |
|||
for _, part := range parts { |
|||
part := strings.TrimSpace(part) |
|||
if part == "gzip" || strings.HasPrefix(part, "gzip;") { |
|||
return gzip.NewWriter(writer), "gzip" |
|||
} |
|||
} |
|||
return writer, "" |
|||
} |
|||
@ -0,0 +1,137 @@ |
|||
// Copyright 2016 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Copyright (c) 2013, The Prometheus Authors
|
|||
// All rights reserved.
|
|||
//
|
|||
// Use of this source code is governed by a BSD-style license that can be found
|
|||
// in the LICENSE file.
|
|||
|
|||
package promhttp |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"log" |
|||
"net/http" |
|||
"net/http/httptest" |
|||
"testing" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
type errorCollector struct{} |
|||
|
|||
func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { |
|||
ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) |
|||
} |
|||
|
|||
func (e errorCollector) Collect(ch chan<- prometheus.Metric) { |
|||
ch <- prometheus.NewInvalidMetric( |
|||
prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), |
|||
errors.New("collect error"), |
|||
) |
|||
} |
|||
|
|||
func TestHandlerErrorHandling(t *testing.T) { |
|||
|
|||
// Create a registry that collects a MetricFamily with two elements,
|
|||
// another with one, and reports an error.
|
|||
reg := prometheus.NewRegistry() |
|||
|
|||
cnt := prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Name: "the_count", |
|||
Help: "Ah-ah-ah! Thunder and lightning!", |
|||
}) |
|||
reg.MustRegister(cnt) |
|||
|
|||
cntVec := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Name: "name", |
|||
Help: "docstring", |
|||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
|||
}, |
|||
[]string{"labelname"}, |
|||
) |
|||
cntVec.WithLabelValues("val1").Inc() |
|||
cntVec.WithLabelValues("val2").Inc() |
|||
reg.MustRegister(cntVec) |
|||
|
|||
reg.MustRegister(errorCollector{}) |
|||
|
|||
logBuf := &bytes.Buffer{} |
|||
logger := log.New(logBuf, "", 0) |
|||
|
|||
writer := httptest.NewRecorder() |
|||
request, _ := http.NewRequest("GET", "/", nil) |
|||
request.Header.Add("Accept", "test/plain") |
|||
|
|||
errorHandler := HandlerFor(reg, HandlerOpts{ |
|||
ErrorLog: logger, |
|||
ErrorHandling: HTTPErrorOnError, |
|||
}) |
|||
continueHandler := HandlerFor(reg, HandlerOpts{ |
|||
ErrorLog: logger, |
|||
ErrorHandling: ContinueOnError, |
|||
}) |
|||
panicHandler := HandlerFor(reg, HandlerOpts{ |
|||
ErrorLog: logger, |
|||
ErrorHandling: PanicOnError, |
|||
}) |
|||
wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error |
|||
` |
|||
wantErrorBody := `An error has occurred during metrics gathering: |
|||
|
|||
error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error |
|||
` |
|||
wantOKBody := `# HELP name docstring |
|||
# TYPE name counter |
|||
name{constname="constvalue",labelname="val1"} 1 |
|||
name{constname="constvalue",labelname="val2"} 1 |
|||
# HELP the_count Ah-ah-ah! Thunder and lightning! |
|||
# TYPE the_count counter |
|||
the_count 0 |
|||
` |
|||
|
|||
errorHandler.ServeHTTP(writer, request) |
|||
if got, want := writer.Code, http.StatusInternalServerError; got != want { |
|||
t.Errorf("got HTTP status code %d, want %d", got, want) |
|||
} |
|||
if got := logBuf.String(); got != wantMsg { |
|||
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg) |
|||
} |
|||
if got := writer.Body.String(); got != wantErrorBody { |
|||
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) |
|||
} |
|||
logBuf.Reset() |
|||
writer.Body.Reset() |
|||
writer.Code = http.StatusOK |
|||
|
|||
continueHandler.ServeHTTP(writer, request) |
|||
if got, want := writer.Code, http.StatusOK; got != want { |
|||
t.Errorf("got HTTP status code %d, want %d", got, want) |
|||
} |
|||
if got := logBuf.String(); got != wantMsg { |
|||
t.Errorf("got log message %q, want %q", got, wantMsg) |
|||
} |
|||
if got := writer.Body.String(); got != wantOKBody { |
|||
t.Errorf("got body %q, want %q", got, wantOKBody) |
|||
} |
|||
|
|||
defer func() { |
|||
if err := recover(); err == nil { |
|||
t.Error("expected panic from panicHandler") |
|||
} |
|||
}() |
|||
panicHandler.ServeHTTP(writer, request) |
|||
} |
|||
@ -0,0 +1,56 @@ |
|||
// Copyright 2016 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package push_test |
|||
|
|||
import ( |
|||
"fmt" |
|||
"time" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
"github.com/prometheus/client_golang/prometheus/push" |
|||
) |
|||
|
|||
func ExampleCollectors() { |
|||
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ |
|||
Name: "db_backup_last_completion_timestamp_seconds", |
|||
Help: "The timestamp of the last successful completion of a DB backup.", |
|||
}) |
|||
completionTime.Set(float64(time.Now().Unix())) |
|||
if err := push.Collectors( |
|||
"db_backup", push.HostnameGroupingKey(), |
|||
"http://pushgateway:9091", |
|||
completionTime, |
|||
); err != nil { |
|||
fmt.Println("Could not push completion time to Pushgateway:", err) |
|||
} |
|||
} |
|||
|
|||
func ExampleFromGatherer() { |
|||
registry := prometheus.NewRegistry() |
|||
|
|||
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ |
|||
Name: "db_backup_last_completion_timestamp_seconds", |
|||
Help: "The timestamp of the last successful completion of a DB backup.", |
|||
}) |
|||
registry.MustRegister(completionTime) |
|||
|
|||
completionTime.Set(float64(time.Now().Unix())) |
|||
if err := push.FromGatherer( |
|||
"db_backup", push.HostnameGroupingKey(), |
|||
"http://pushgateway:9091", |
|||
registry, |
|||
); err != nil { |
|||
fmt.Println("Could not push completion time to Pushgateway:", err) |
|||
} |
|||
} |
|||
@ -0,0 +1,172 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Copyright (c) 2013, The Prometheus Authors
|
|||
// All rights reserved.
|
|||
//
|
|||
// Use of this source code is governed by a BSD-style license that can be found
|
|||
// in the LICENSE file.
|
|||
|
|||
// Package push provides functions to push metrics to a Pushgateway. The metrics
|
|||
// to push are either collected from a provided registry, or from explicitly
|
|||
// listed collectors.
|
|||
//
|
|||
// See the documentation of the Pushgateway to understand the meaning of the
|
|||
// grouping parameters and the differences between push.Registry and
|
|||
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
|
|||
// on the other hand: https://github.com/prometheus/pushgateway
|
|||
package push |
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/url" |
|||
"os" |
|||
"strings" |
|||
|
|||
"github.com/prometheus/common/expfmt" |
|||
"github.com/prometheus/common/model" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
const contentTypeHeader = "Content-Type" |
|||
|
|||
// FromGatherer triggers a metric collection by the provided Gatherer (which is
|
|||
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
|
|||
// to the Pushgateway specified by url, using the provided job name and the
|
|||
// (optional) further grouping labels (the grouping map may be nil). See the
|
|||
// Pushgateway documentation for detailed implications of the job and other
|
|||
// grouping labels. Neither the job name nor any grouping label value may
|
|||
// contain a "/". The metrics pushed must not contain a job label of their own
|
|||
// nor any of the grouping labels.
|
|||
//
|
|||
// You can use just host:port or ip:port as url, in which case 'http://' is
|
|||
// added automatically. You can also include the schema in the URL. However, do
|
|||
// not include the '/metrics/jobs/...' part.
|
|||
//
|
|||
// Note that all previously pushed metrics with the same job and other grouping
|
|||
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
|
|||
// method 'PUT' to push to the Pushgateway.)
|
|||
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { |
|||
return push(job, grouping, url, g, "PUT") |
|||
} |
|||
|
|||
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
|
|||
// with the same name (and the same job and other grouping labels) will be
|
|||
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
|
|||
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { |
|||
return push(job, grouping, url, g, "POST") |
|||
} |
|||
|
|||
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { |
|||
if !strings.Contains(pushURL, "://") { |
|||
pushURL = "http://" + pushURL |
|||
} |
|||
if strings.HasSuffix(pushURL, "/") { |
|||
pushURL = pushURL[:len(pushURL)-1] |
|||
} |
|||
|
|||
if strings.Contains(job, "/") { |
|||
return fmt.Errorf("job contains '/': %s", job) |
|||
} |
|||
urlComponents := []string{url.QueryEscape(job)} |
|||
for ln, lv := range grouping { |
|||
if !model.LabelNameRE.MatchString(ln) { |
|||
return fmt.Errorf("grouping label has invalid name: %s", ln) |
|||
} |
|||
if strings.Contains(lv, "/") { |
|||
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) |
|||
} |
|||
urlComponents = append(urlComponents, ln, lv) |
|||
} |
|||
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) |
|||
|
|||
mfs, err := g.Gather() |
|||
if err != nil { |
|||
return err |
|||
} |
|||
buf := &bytes.Buffer{} |
|||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) |
|||
// Check for pre-existing grouping labels:
|
|||
for _, mf := range mfs { |
|||
for _, m := range mf.GetMetric() { |
|||
for _, l := range m.GetLabel() { |
|||
if l.GetName() == "job" { |
|||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) |
|||
} |
|||
if _, ok := grouping[l.GetName()]; ok { |
|||
return fmt.Errorf( |
|||
"pushed metric %s (%s) already contains grouping label %s", |
|||
mf.GetName(), m, l.GetName(), |
|||
) |
|||
} |
|||
} |
|||
} |
|||
enc.Encode(mf) |
|||
} |
|||
req, err := http.NewRequest(method, pushURL, buf) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) |
|||
resp, err := http.DefaultClient.Do(req) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
defer resp.Body.Close() |
|||
if resp.StatusCode != 202 { |
|||
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
|
|||
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
|
|||
// it collects from the provided collectors directly. It is a convenient way to
|
|||
// push only a few metrics.
|
|||
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { |
|||
return pushCollectors(job, grouping, url, "PUT", collectors...) |
|||
} |
|||
|
|||
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
|
|||
// Instead, it collects from the provided collectors directly. It is a
|
|||
// convenient way to push only a few metrics.
|
|||
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { |
|||
return pushCollectors(job, grouping, url, "POST", collectors...) |
|||
} |
|||
|
|||
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { |
|||
r := prometheus.NewRegistry() |
|||
for _, collector := range collectors { |
|||
if err := r.Register(collector); err != nil { |
|||
return err |
|||
} |
|||
} |
|||
return push(job, grouping, url, r, method) |
|||
} |
|||
|
|||
// HostnameGroupingKey returns a label map with the only entry
|
|||
// {instance="<hostname>"}. This can be conveniently used as the grouping
|
|||
// parameter if metrics should be pushed with the hostname as label. The
|
|||
// returned map is created upon each call so that the caller is free to add more
|
|||
// labels to the map.
|
|||
func HostnameGroupingKey() map[string]string { |
|||
hostname, err := os.Hostname() |
|||
if err != nil { |
|||
return map[string]string{"instance": "unknown"} |
|||
} |
|||
return map[string]string{"instance": hostname} |
|||
} |
|||
@ -0,0 +1,176 @@ |
|||
// Copyright 2016 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Copyright (c) 2013, The Prometheus Authors
|
|||
// All rights reserved.
|
|||
//
|
|||
// Use of this source code is governed by a BSD-style license that can be found
|
|||
// in the LICENSE file.
|
|||
|
|||
package push |
|||
|
|||
import ( |
|||
"bytes" |
|||
"io/ioutil" |
|||
"net/http" |
|||
"net/http/httptest" |
|||
"os" |
|||
"testing" |
|||
|
|||
"github.com/prometheus/common/expfmt" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
) |
|||
|
|||
func TestPush(t *testing.T) { |
|||
|
|||
var ( |
|||
lastMethod string |
|||
lastBody []byte |
|||
lastPath string |
|||
) |
|||
|
|||
host, err := os.Hostname() |
|||
if err != nil { |
|||
t.Error(err) |
|||
} |
|||
|
|||
// Fake a Pushgateway that always responds with 202.
|
|||
pgwOK := httptest.NewServer( |
|||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
|||
lastMethod = r.Method |
|||
var err error |
|||
lastBody, err = ioutil.ReadAll(r.Body) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
lastPath = r.URL.EscapedPath() |
|||
w.Header().Set("Content-Type", `text/plain; charset=utf-8`) |
|||
w.WriteHeader(http.StatusAccepted) |
|||
}), |
|||
) |
|||
defer pgwOK.Close() |
|||
|
|||
// Fake a Pushgateway that always responds with 500.
|
|||
pgwErr := httptest.NewServer( |
|||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
|||
http.Error(w, "fake error", http.StatusInternalServerError) |
|||
}), |
|||
) |
|||
defer pgwErr.Close() |
|||
|
|||
metric1 := prometheus.NewCounter(prometheus.CounterOpts{ |
|||
Name: "testname1", |
|||
Help: "testhelp1", |
|||
}) |
|||
metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ |
|||
Name: "testname2", |
|||
Help: "testhelp2", |
|||
ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, |
|||
}) |
|||
|
|||
reg := prometheus.NewRegistry() |
|||
reg.MustRegister(metric1) |
|||
reg.MustRegister(metric2) |
|||
|
|||
mfs, err := reg.Gather() |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
|
|||
buf := &bytes.Buffer{} |
|||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) |
|||
|
|||
for _, mf := range mfs { |
|||
if err := enc.Encode(mf); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
} |
|||
wantBody := buf.Bytes() |
|||
|
|||
// PushCollectors, all good.
|
|||
if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if lastMethod != "PUT" { |
|||
t.Error("want method PUT for PushCollectors, got", lastMethod) |
|||
} |
|||
if bytes.Compare(lastBody, wantBody) != 0 { |
|||
t.Errorf("got body %v, want %v", lastBody, wantBody) |
|||
} |
|||
if lastPath != "/metrics/job/testjob/instance/"+host { |
|||
t.Error("unexpected path:", lastPath) |
|||
} |
|||
|
|||
// PushAddCollectors, with nil grouping, all good.
|
|||
if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if lastMethod != "POST" { |
|||
t.Error("want method POST for PushAddCollectors, got", lastMethod) |
|||
} |
|||
if bytes.Compare(lastBody, wantBody) != 0 { |
|||
t.Errorf("got body %v, want %v", lastBody, wantBody) |
|||
} |
|||
if lastPath != "/metrics/job/testjob" { |
|||
t.Error("unexpected path:", lastPath) |
|||
} |
|||
|
|||
// PushCollectors with a broken PGW.
|
|||
if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil { |
|||
t.Error("push to broken Pushgateway succeeded") |
|||
} else { |
|||
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { |
|||
t.Errorf("got error %q, want %q", got, want) |
|||
} |
|||
} |
|||
|
|||
// PushCollectors with invalid grouping or job.
|
|||
if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil { |
|||
t.Error("push with grouping contained in metrics succeeded") |
|||
} |
|||
if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil { |
|||
t.Error("push with invalid job value succeeded") |
|||
} |
|||
if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { |
|||
t.Error("push with invalid grouping succeeded") |
|||
} |
|||
if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { |
|||
t.Error("push with invalid grouping succeeded") |
|||
} |
|||
|
|||
// Push registry, all good.
|
|||
if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if lastMethod != "PUT" { |
|||
t.Error("want method PUT for Push, got", lastMethod) |
|||
} |
|||
if bytes.Compare(lastBody, wantBody) != 0 { |
|||
t.Errorf("got body %v, want %v", lastBody, wantBody) |
|||
} |
|||
|
|||
// PushAdd registry, all good.
|
|||
if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if lastMethod != "POST" { |
|||
t.Error("want method POSTT for PushAdd, got", lastMethod) |
|||
} |
|||
if bytes.Compare(lastBody, wantBody) != 0 { |
|||
t.Errorf("got body %v, want %v", lastBody, wantBody) |
|||
} |
|||
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { |
|||
t.Error("unexpected path:", lastPath) |
|||
} |
|||
} |
|||
@ -0,0 +1,806 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"bytes" |
|||
"errors" |
|||
"fmt" |
|||
"os" |
|||
"sort" |
|||
"sync" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
const ( |
|||
// Capacity for the channel to collect metrics and descriptors.
|
|||
capMetricChan = 1000 |
|||
capDescChan = 10 |
|||
) |
|||
|
|||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
|||
// Registerer and Gatherer interface a number of convenience functions in this
|
|||
// package act on. Initially, both variables point to the same Registry, which
|
|||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
|||
// NewGoCollector) already registered. This approach to keep default instances
|
|||
// as global state mirrors the approach of other packages in the Go standard
|
|||
// library. Note that there are caveats. Change the variables with caution and
|
|||
// only if you understand the consequences. Users who want to avoid global state
|
|||
// altogether should not use the convenience function and act on custom
|
|||
// instances instead.
|
|||
var ( |
|||
defaultRegistry = NewRegistry() |
|||
DefaultRegisterer Registerer = defaultRegistry |
|||
DefaultGatherer Gatherer = defaultRegistry |
|||
) |
|||
|
|||
func init() { |
|||
MustRegister(NewProcessCollector(os.Getpid(), "")) |
|||
MustRegister(NewGoCollector()) |
|||
} |
|||
|
|||
// NewRegistry creates a new vanilla Registry without any Collectors
|
|||
// pre-registered.
|
|||
func NewRegistry() *Registry { |
|||
return &Registry{ |
|||
collectorsByID: map[uint64]Collector{}, |
|||
descIDs: map[uint64]struct{}{}, |
|||
dimHashesByName: map[string]uint64{}, |
|||
} |
|||
} |
|||
|
|||
// NewPedanticRegistry returns a registry that checks during collection if each
|
|||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
|||
// actually been registered with the registry.
|
|||
//
|
|||
// Usually, a Registry will be happy as long as the union of all collected
|
|||
// Metrics is consistent and valid even if some metrics are not consistent with
|
|||
// their own Desc or a Desc provided by their registered Collector. Well-behaved
|
|||
// Collectors and Metrics will only provide consistent Descs. This Registry is
|
|||
// useful to test the implementation of Collectors and Metrics.
|
|||
func NewPedanticRegistry() *Registry { |
|||
r := NewRegistry() |
|||
r.pedanticChecksEnabled = true |
|||
return r |
|||
} |
|||
|
|||
// Registerer is the interface for the part of a registry in charge of
|
|||
// registering and unregistering. Users of custom registries should use
|
|||
// Registerer as type for registration purposes (rather then the Registry type
|
|||
// directly). In that way, they are free to use custom Registerer implementation
|
|||
// (e.g. for testing purposes).
|
|||
type Registerer interface { |
|||
// Register registers a new Collector to be included in metrics
|
|||
// collection. It returns an error if the descriptors provided by the
|
|||
// Collector are invalid or if they — in combination with descriptors of
|
|||
// already registered Collectors — do not fulfill the consistency and
|
|||
// uniqueness criteria described in the documentation of metric.Desc.
|
|||
//
|
|||
// If the provided Collector is equal to a Collector already registered
|
|||
// (which includes the case of re-registering the same Collector), the
|
|||
// returned error is an instance of AlreadyRegisteredError, which
|
|||
// contains the previously registered Collector.
|
|||
//
|
|||
// It is in general not safe to register the same Collector multiple
|
|||
// times concurrently.
|
|||
Register(Collector) error |
|||
// MustRegister works like Register but registers any number of
|
|||
// Collectors and panics upon the first registration that causes an
|
|||
// error.
|
|||
MustRegister(...Collector) |
|||
// Unregister unregisters the Collector that equals the Collector passed
|
|||
// in as an argument. (Two Collectors are considered equal if their
|
|||
// Describe method yields the same set of descriptors.) The function
|
|||
// returns whether a Collector was unregistered.
|
|||
//
|
|||
// Note that even after unregistering, it will not be possible to
|
|||
// register a new Collector that is inconsistent with the unregistered
|
|||
// Collector, e.g. a Collector collecting metrics with the same name but
|
|||
// a different help string. The rationale here is that the same registry
|
|||
// instance must only collect consistent metrics throughout its
|
|||
// lifetime.
|
|||
Unregister(Collector) bool |
|||
} |
|||
|
|||
// Gatherer is the interface for the part of a registry in charge of gathering
|
|||
// the collected metrics into a number of MetricFamilies. The Gatherer interface
|
|||
// comes with the same general implication as described for the Registerer
|
|||
// interface.
|
|||
type Gatherer interface { |
|||
// Gather calls the Collect method of the registered Collectors and then
|
|||
// gathers the collected metrics into a lexicographically sorted slice
|
|||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
|||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
|||
// returned, the returned MetricFamily slice could be nil (in case of a
|
|||
// fatal error that prevented any meaningful metric collection) or
|
|||
// contain a number of MetricFamily protobufs, some of which might be
|
|||
// incomplete, and some might be missing altogether. The returned error
|
|||
// (which might be a MultiError) explains the details. In scenarios
|
|||
// where complete collection is critical, the returned MetricFamily
|
|||
// protobufs should be disregarded if the returned error is non-nil.
|
|||
Gather() ([]*dto.MetricFamily, error) |
|||
} |
|||
|
|||
// Register registers the provided Collector with the DefaultRegisterer.
|
|||
//
|
|||
// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
|
|||
// details.
|
|||
func Register(c Collector) error { |
|||
return DefaultRegisterer.Register(c) |
|||
} |
|||
|
|||
// MustRegister registers the provided Collectors with the DefaultRegisterer and
|
|||
// panics if any error occurs.
|
|||
//
|
|||
// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
|
|||
// there for more details.
|
|||
func MustRegister(cs ...Collector) { |
|||
DefaultRegisterer.MustRegister(cs...) |
|||
} |
|||
|
|||
// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
|
|||
// returns the Collector, unless an equal Collector was registered before, in
|
|||
// which case that Collector is returned.
|
|||
//
|
|||
// Deprecated: RegisterOrGet is merely a convenience function for the
|
|||
// implementation as described in the documentation for
|
|||
// AlreadyRegisteredError. As the use case is relatively rare, this function
|
|||
// will be removed in a future version of this package to clean up the
|
|||
// namespace.
|
|||
func RegisterOrGet(c Collector) (Collector, error) { |
|||
if err := Register(c); err != nil { |
|||
if are, ok := err.(AlreadyRegisteredError); ok { |
|||
return are.ExistingCollector, nil |
|||
} |
|||
return nil, err |
|||
} |
|||
return c, nil |
|||
} |
|||
|
|||
// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
|
|||
// an error.
|
|||
//
|
|||
// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
|
|||
// there for details.
|
|||
func MustRegisterOrGet(c Collector) Collector { |
|||
c, err := RegisterOrGet(c) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return c |
|||
} |
|||
|
|||
// Unregister removes the registration of the provided Collector from the
|
|||
// DefaultRegisterer.
|
|||
//
|
|||
// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
|
|||
// more details.
|
|||
func Unregister(c Collector) bool { |
|||
return DefaultRegisterer.Unregister(c) |
|||
} |
|||
|
|||
// GathererFunc turns a function into a Gatherer.
|
|||
type GathererFunc func() ([]*dto.MetricFamily, error) |
|||
|
|||
// Gather implements Gatherer.
|
|||
func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { |
|||
return gf() |
|||
} |
|||
|
|||
// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
|
|||
// gathers from the previous DefaultGatherers but then merges the MetricFamily
|
|||
// protobufs returned from the provided hook function with the MetricFamily
|
|||
// protobufs returned from the original DefaultGatherer.
|
|||
//
|
|||
// Deprecated: This function manipulates the DefaultGatherer variable. Consider
|
|||
// the implications, i.e. don't do this concurrently with any uses of the
|
|||
// DefaultGatherer. In the rare cases where you need to inject MetricFamily
|
|||
// protobufs directly, it is recommended to use a custom Registry and combine it
|
|||
// with a custom Gatherer using the Gatherers type (see
|
|||
// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
|
|||
// with previous versions of this package.
|
|||
func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { |
|||
DefaultGatherer = Gatherers{ |
|||
DefaultGatherer, |
|||
GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), |
|||
} |
|||
} |
|||
|
|||
// AlreadyRegisteredError is returned by the Register method if the Collector to
|
|||
// be registered has already been registered before, or a different Collector
|
|||
// that collects the same metrics has been registered before. Registration fails
|
|||
// in that case, but you can detect from the kind of error what has
|
|||
// happened. The error contains fields for the existing Collector and the
|
|||
// (rejected) new Collector that equals the existing one. This can be used to
|
|||
// find out if an equal Collector has been registered before and switch over to
|
|||
// using the old one, as demonstrated in the example.
|
|||
type AlreadyRegisteredError struct { |
|||
ExistingCollector, NewCollector Collector |
|||
} |
|||
|
|||
func (err AlreadyRegisteredError) Error() string { |
|||
return "duplicate metrics collector registration attempted" |
|||
} |
|||
|
|||
// MultiError is a slice of errors implementing the error interface. It is used
|
|||
// by a Gatherer to report multiple errors during MetricFamily gathering.
|
|||
type MultiError []error |
|||
|
|||
func (errs MultiError) Error() string { |
|||
if len(errs) == 0 { |
|||
return "" |
|||
} |
|||
buf := &bytes.Buffer{} |
|||
fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) |
|||
for _, err := range errs { |
|||
fmt.Fprintf(buf, "\n* %s", err) |
|||
} |
|||
return buf.String() |
|||
} |
|||
|
|||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
|||
// contained error as error if len(errs is 1). In all other cases, it returns
|
|||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
|||
// that only uses the MultiError if needed.
|
|||
func (errs MultiError) MaybeUnwrap() error { |
|||
switch len(errs) { |
|||
case 0: |
|||
return nil |
|||
case 1: |
|||
return errs[0] |
|||
default: |
|||
return errs |
|||
} |
|||
} |
|||
|
|||
// Registry registers Prometheus collectors, collects their metrics, and gathers
|
|||
// them into MetricFamilies for exposition. It implements both Registerer and
|
|||
// Gatherer. The zero value is not usable. Create instances with NewRegistry or
|
|||
// NewPedanticRegistry.
|
|||
type Registry struct { |
|||
mtx sync.RWMutex |
|||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
|||
descIDs map[uint64]struct{} |
|||
dimHashesByName map[string]uint64 |
|||
pedanticChecksEnabled bool |
|||
} |
|||
|
|||
// Register implements Registerer.
|
|||
func (r *Registry) Register(c Collector) error { |
|||
var ( |
|||
descChan = make(chan *Desc, capDescChan) |
|||
newDescIDs = map[uint64]struct{}{} |
|||
newDimHashesByName = map[string]uint64{} |
|||
collectorID uint64 // Just a sum of all desc IDs.
|
|||
duplicateDescErr error |
|||
) |
|||
go func() { |
|||
c.Describe(descChan) |
|||
close(descChan) |
|||
}() |
|||
r.mtx.Lock() |
|||
defer r.mtx.Unlock() |
|||
// Coduct various tests...
|
|||
for desc := range descChan { |
|||
|
|||
// Is the descriptor valid at all?
|
|||
if desc.err != nil { |
|||
return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) |
|||
} |
|||
|
|||
// Is the descID unique?
|
|||
// (In other words: Is the fqName + constLabel combination unique?)
|
|||
if _, exists := r.descIDs[desc.id]; exists { |
|||
duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) |
|||
} |
|||
// If it is not a duplicate desc in this collector, add it to
|
|||
// the collectorID. (We allow duplicate descs within the same
|
|||
// collector, but their existence must be a no-op.)
|
|||
if _, exists := newDescIDs[desc.id]; !exists { |
|||
newDescIDs[desc.id] = struct{}{} |
|||
collectorID += desc.id |
|||
} |
|||
|
|||
// Are all the label names and the help string consistent with
|
|||
// previous descriptors of the same name?
|
|||
// First check existing descriptors...
|
|||
if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { |
|||
if dimHash != desc.dimHash { |
|||
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) |
|||
} |
|||
} else { |
|||
// ...then check the new descriptors already seen.
|
|||
if dimHash, exists := newDimHashesByName[desc.fqName]; exists { |
|||
if dimHash != desc.dimHash { |
|||
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) |
|||
} |
|||
} else { |
|||
newDimHashesByName[desc.fqName] = desc.dimHash |
|||
} |
|||
} |
|||
} |
|||
// Did anything happen at all?
|
|||
if len(newDescIDs) == 0 { |
|||
return errors.New("collector has no descriptors") |
|||
} |
|||
if existing, exists := r.collectorsByID[collectorID]; exists { |
|||
return AlreadyRegisteredError{ |
|||
ExistingCollector: existing, |
|||
NewCollector: c, |
|||
} |
|||
} |
|||
// If the collectorID is new, but at least one of the descs existed
|
|||
// before, we are in trouble.
|
|||
if duplicateDescErr != nil { |
|||
return duplicateDescErr |
|||
} |
|||
|
|||
// Only after all tests have passed, actually register.
|
|||
r.collectorsByID[collectorID] = c |
|||
for hash := range newDescIDs { |
|||
r.descIDs[hash] = struct{}{} |
|||
} |
|||
for name, dimHash := range newDimHashesByName { |
|||
r.dimHashesByName[name] = dimHash |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// Unregister implements Registerer.
|
|||
func (r *Registry) Unregister(c Collector) bool { |
|||
var ( |
|||
descChan = make(chan *Desc, capDescChan) |
|||
descIDs = map[uint64]struct{}{} |
|||
collectorID uint64 // Just a sum of the desc IDs.
|
|||
) |
|||
go func() { |
|||
c.Describe(descChan) |
|||
close(descChan) |
|||
}() |
|||
for desc := range descChan { |
|||
if _, exists := descIDs[desc.id]; !exists { |
|||
collectorID += desc.id |
|||
descIDs[desc.id] = struct{}{} |
|||
} |
|||
} |
|||
|
|||
r.mtx.RLock() |
|||
if _, exists := r.collectorsByID[collectorID]; !exists { |
|||
r.mtx.RUnlock() |
|||
return false |
|||
} |
|||
r.mtx.RUnlock() |
|||
|
|||
r.mtx.Lock() |
|||
defer r.mtx.Unlock() |
|||
|
|||
delete(r.collectorsByID, collectorID) |
|||
for id := range descIDs { |
|||
delete(r.descIDs, id) |
|||
} |
|||
// dimHashesByName is left untouched as those must be consistent
|
|||
// throughout the lifetime of a program.
|
|||
return true |
|||
} |
|||
|
|||
// MustRegister implements Registerer.
|
|||
func (r *Registry) MustRegister(cs ...Collector) { |
|||
for _, c := range cs { |
|||
if err := r.Register(c); err != nil { |
|||
panic(err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Gather implements Gatherer.
|
|||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) { |
|||
var ( |
|||
metricChan = make(chan Metric, capMetricChan) |
|||
metricHashes = map[uint64]struct{}{} |
|||
dimHashes = map[string]uint64{} |
|||
wg sync.WaitGroup |
|||
errs MultiError // The collected errors to return in the end.
|
|||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
|||
) |
|||
|
|||
r.mtx.RLock() |
|||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) |
|||
|
|||
// Scatter.
|
|||
// (Collectors could be complex and slow, so we call them all at once.)
|
|||
wg.Add(len(r.collectorsByID)) |
|||
go func() { |
|||
wg.Wait() |
|||
close(metricChan) |
|||
}() |
|||
for _, collector := range r.collectorsByID { |
|||
go func(collector Collector) { |
|||
defer wg.Done() |
|||
collector.Collect(metricChan) |
|||
}(collector) |
|||
} |
|||
|
|||
// In case pedantic checks are enabled, we have to copy the map before
|
|||
// giving up the RLock.
|
|||
if r.pedanticChecksEnabled { |
|||
registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) |
|||
for id := range r.descIDs { |
|||
registeredDescIDs[id] = struct{}{} |
|||
} |
|||
} |
|||
|
|||
r.mtx.RUnlock() |
|||
|
|||
// Drain metricChan in case of premature return.
|
|||
defer func() { |
|||
for range metricChan { |
|||
} |
|||
}() |
|||
|
|||
// Gather.
|
|||
for metric := range metricChan { |
|||
// This could be done concurrently, too, but it required locking
|
|||
// of metricFamiliesByName (and of metricHashes if checks are
|
|||
// enabled). Most likely not worth it.
|
|||
desc := metric.Desc() |
|||
dtoMetric := &dto.Metric{} |
|||
if err := metric.Write(dtoMetric); err != nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"error collecting metric %v: %s", desc, err, |
|||
)) |
|||
continue |
|||
} |
|||
metricFamily, ok := metricFamiliesByName[desc.fqName] |
|||
if ok { |
|||
if metricFamily.GetHelp() != desc.help { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s has help %q but should have %q", |
|||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), |
|||
)) |
|||
continue |
|||
} |
|||
// TODO(beorn7): Simplify switch once Desc has type.
|
|||
switch metricFamily.GetType() { |
|||
case dto.MetricType_COUNTER: |
|||
if dtoMetric.Counter == nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s should be a Counter", |
|||
desc.fqName, dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
case dto.MetricType_GAUGE: |
|||
if dtoMetric.Gauge == nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s should be a Gauge", |
|||
desc.fqName, dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
case dto.MetricType_SUMMARY: |
|||
if dtoMetric.Summary == nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s should be a Summary", |
|||
desc.fqName, dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
case dto.MetricType_UNTYPED: |
|||
if dtoMetric.Untyped == nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s should be Untyped", |
|||
desc.fqName, dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
case dto.MetricType_HISTOGRAM: |
|||
if dtoMetric.Histogram == nil { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s should be a Histogram", |
|||
desc.fqName, dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
default: |
|||
panic("encountered MetricFamily with invalid type") |
|||
} |
|||
} else { |
|||
metricFamily = &dto.MetricFamily{} |
|||
metricFamily.Name = proto.String(desc.fqName) |
|||
metricFamily.Help = proto.String(desc.help) |
|||
// TODO(beorn7): Simplify switch once Desc has type.
|
|||
switch { |
|||
case dtoMetric.Gauge != nil: |
|||
metricFamily.Type = dto.MetricType_GAUGE.Enum() |
|||
case dtoMetric.Counter != nil: |
|||
metricFamily.Type = dto.MetricType_COUNTER.Enum() |
|||
case dtoMetric.Summary != nil: |
|||
metricFamily.Type = dto.MetricType_SUMMARY.Enum() |
|||
case dtoMetric.Untyped != nil: |
|||
metricFamily.Type = dto.MetricType_UNTYPED.Enum() |
|||
case dtoMetric.Histogram != nil: |
|||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() |
|||
default: |
|||
errs = append(errs, fmt.Errorf( |
|||
"empty metric collected: %s", dtoMetric, |
|||
)) |
|||
continue |
|||
} |
|||
metricFamiliesByName[desc.fqName] = metricFamily |
|||
} |
|||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { |
|||
errs = append(errs, err) |
|||
continue |
|||
} |
|||
if r.pedanticChecksEnabled { |
|||
// Is the desc registered at all?
|
|||
if _, exist := registeredDescIDs[desc.id]; !exist { |
|||
errs = append(errs, fmt.Errorf( |
|||
"collected metric %s %s with unregistered descriptor %s", |
|||
metricFamily.GetName(), dtoMetric, desc, |
|||
)) |
|||
continue |
|||
} |
|||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { |
|||
errs = append(errs, err) |
|||
continue |
|||
} |
|||
} |
|||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric) |
|||
} |
|||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() |
|||
} |
|||
|
|||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
|||
// interface itself. Its Gather method calls Gather on all Gatherers in the
|
|||
// slice in order and returns the merged results. Errors returned from the
|
|||
// Gather calles are all returned in a flattened MultiError. Duplicate and
|
|||
// inconsistent Metrics are skipped (first occurrence in slice order wins) and
|
|||
// reported in the returned error.
|
|||
//
|
|||
// Gatherers can be used to merge the Gather results from multiple
|
|||
// Registries. It also provides a way to directly inject existing MetricFamily
|
|||
// protobufs into the gathering by creating a custom Gatherer with a Gather
|
|||
// method that simply returns the existing MetricFamily protobufs. Note that no
|
|||
// registration is involved (in contrast to Collector registration), so
|
|||
// obviously registration-time checks cannot happen. Any inconsistencies between
|
|||
// the gathered MetricFamilies are reported as errors by the Gather method, and
|
|||
// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
|
|||
// (e.g. syntactically invalid metric or label names) will go undetected.
|
|||
type Gatherers []Gatherer |
|||
|
|||
// Gather implements Gatherer.
|
|||
func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { |
|||
var ( |
|||
metricFamiliesByName = map[string]*dto.MetricFamily{} |
|||
metricHashes = map[uint64]struct{}{} |
|||
dimHashes = map[string]uint64{} |
|||
errs MultiError // The collected errors to return in the end.
|
|||
) |
|||
|
|||
for i, g := range gs { |
|||
mfs, err := g.Gather() |
|||
if err != nil { |
|||
if multiErr, ok := err.(MultiError); ok { |
|||
for _, err := range multiErr { |
|||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) |
|||
} |
|||
} else { |
|||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) |
|||
} |
|||
} |
|||
for _, mf := range mfs { |
|||
existingMF, exists := metricFamiliesByName[mf.GetName()] |
|||
if exists { |
|||
if existingMF.GetHelp() != mf.GetHelp() { |
|||
errs = append(errs, fmt.Errorf( |
|||
"gathered metric family %s has help %q but should have %q", |
|||
mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), |
|||
)) |
|||
continue |
|||
} |
|||
if existingMF.GetType() != mf.GetType() { |
|||
errs = append(errs, fmt.Errorf( |
|||
"gathered metric family %s has type %s but should have %s", |
|||
mf.GetName(), mf.GetType(), existingMF.GetType(), |
|||
)) |
|||
continue |
|||
} |
|||
} else { |
|||
existingMF = &dto.MetricFamily{} |
|||
existingMF.Name = mf.Name |
|||
existingMF.Help = mf.Help |
|||
existingMF.Type = mf.Type |
|||
metricFamiliesByName[mf.GetName()] = existingMF |
|||
} |
|||
for _, m := range mf.Metric { |
|||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { |
|||
errs = append(errs, err) |
|||
continue |
|||
} |
|||
existingMF.Metric = append(existingMF.Metric, m) |
|||
} |
|||
} |
|||
} |
|||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() |
|||
} |
|||
|
|||
// metricSorter is a sortable slice of *dto.Metric.
|
|||
type metricSorter []*dto.Metric |
|||
|
|||
func (s metricSorter) Len() int { |
|||
return len(s) |
|||
} |
|||
|
|||
func (s metricSorter) Swap(i, j int) { |
|||
s[i], s[j] = s[j], s[i] |
|||
} |
|||
|
|||
func (s metricSorter) Less(i, j int) bool { |
|||
if len(s[i].Label) != len(s[j].Label) { |
|||
// This should not happen. The metrics are
|
|||
// inconsistent. However, we have to deal with the fact, as
|
|||
// people might use custom collectors or metric family injection
|
|||
// to create inconsistent metrics. So let's simply compare the
|
|||
// number of labels in this case. That will still yield
|
|||
// reproducible sorting.
|
|||
return len(s[i].Label) < len(s[j].Label) |
|||
} |
|||
for n, lp := range s[i].Label { |
|||
vi := lp.GetValue() |
|||
vj := s[j].Label[n].GetValue() |
|||
if vi != vj { |
|||
return vi < vj |
|||
} |
|||
} |
|||
|
|||
// We should never arrive here. Multiple metrics with the same
|
|||
// label set in the same scrape will lead to undefined ingestion
|
|||
// behavior. However, as above, we have to provide stable sorting
|
|||
// here, even for inconsistent metrics. So sort equal metrics
|
|||
// by their timestamp, with missing timestamps (implying "now")
|
|||
// coming last.
|
|||
if s[i].TimestampMs == nil { |
|||
return false |
|||
} |
|||
if s[j].TimestampMs == nil { |
|||
return true |
|||
} |
|||
return s[i].GetTimestampMs() < s[j].GetTimestampMs() |
|||
} |
|||
|
|||
// normalizeMetricFamilies returns a MetricFamily slice with empty
|
|||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
|||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
|||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { |
|||
for _, mf := range metricFamiliesByName { |
|||
sort.Sort(metricSorter(mf.Metric)) |
|||
} |
|||
names := make([]string, 0, len(metricFamiliesByName)) |
|||
for name, mf := range metricFamiliesByName { |
|||
if len(mf.Metric) > 0 { |
|||
names = append(names, name) |
|||
} |
|||
} |
|||
sort.Strings(names) |
|||
result := make([]*dto.MetricFamily, 0, len(names)) |
|||
for _, name := range names { |
|||
result = append(result, metricFamiliesByName[name]) |
|||
} |
|||
return result |
|||
} |
|||
|
|||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
|||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
|||
// name. If the resulting hash is alread in the provided metricHashes, an error
|
|||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
|||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
|||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
|||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
|||
// the calculated dimHash.
|
|||
func checkMetricConsistency( |
|||
metricFamily *dto.MetricFamily, |
|||
dtoMetric *dto.Metric, |
|||
metricHashes map[uint64]struct{}, |
|||
dimHashes map[string]uint64, |
|||
) error { |
|||
// Type consistency with metric family.
|
|||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || |
|||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || |
|||
metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || |
|||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || |
|||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { |
|||
return fmt.Errorf( |
|||
"collected metric %s %s is not a %s", |
|||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(), |
|||
) |
|||
} |
|||
|
|||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
|||
h := hashNew() |
|||
h = hashAdd(h, metricFamily.GetName()) |
|||
h = hashAddByte(h, separatorByte) |
|||
dh := hashNew() |
|||
// Make sure label pairs are sorted. We depend on it for the consistency
|
|||
// check.
|
|||
sort.Sort(LabelPairSorter(dtoMetric.Label)) |
|||
for _, lp := range dtoMetric.Label { |
|||
h = hashAdd(h, lp.GetValue()) |
|||
h = hashAddByte(h, separatorByte) |
|||
dh = hashAdd(dh, lp.GetName()) |
|||
dh = hashAddByte(dh, separatorByte) |
|||
} |
|||
if _, exists := metricHashes[h]; exists { |
|||
return fmt.Errorf( |
|||
"collected metric %s %s was collected before with the same name and label values", |
|||
metricFamily.GetName(), dtoMetric, |
|||
) |
|||
} |
|||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { |
|||
if dimHash != dh { |
|||
return fmt.Errorf( |
|||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", |
|||
metricFamily.GetName(), dtoMetric, |
|||
) |
|||
} |
|||
} else { |
|||
dimHashes[metricFamily.GetName()] = dh |
|||
} |
|||
metricHashes[h] = struct{}{} |
|||
return nil |
|||
} |
|||
|
|||
func checkDescConsistency( |
|||
metricFamily *dto.MetricFamily, |
|||
dtoMetric *dto.Metric, |
|||
desc *Desc, |
|||
) error { |
|||
// Desc help consistency with metric family help.
|
|||
if metricFamily.GetHelp() != desc.help { |
|||
return fmt.Errorf( |
|||
"collected metric %s %s has help %q but should have %q", |
|||
metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, |
|||
) |
|||
} |
|||
|
|||
// Is the desc consistent with the content of the metric?
|
|||
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) |
|||
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) |
|||
for _, l := range desc.variableLabels { |
|||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ |
|||
Name: proto.String(l), |
|||
}) |
|||
} |
|||
if len(lpsFromDesc) != len(dtoMetric.Label) { |
|||
return fmt.Errorf( |
|||
"labels in collected metric %s %s are inconsistent with descriptor %s", |
|||
metricFamily.GetName(), dtoMetric, desc, |
|||
) |
|||
} |
|||
sort.Sort(LabelPairSorter(lpsFromDesc)) |
|||
for i, lpFromDesc := range lpsFromDesc { |
|||
lpFromMetric := dtoMetric.Label[i] |
|||
if lpFromDesc.GetName() != lpFromMetric.GetName() || |
|||
lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { |
|||
return fmt.Errorf( |
|||
"labels in collected metric %s %s are inconsistent with descriptor %s", |
|||
metricFamily.GetName(), dtoMetric, desc, |
|||
) |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
@ -0,0 +1,545 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Copyright (c) 2013, The Prometheus Authors
|
|||
// All rights reserved.
|
|||
//
|
|||
// Use of this source code is governed by a BSD-style license that can be found
|
|||
// in the LICENSE file.
|
|||
|
|||
package prometheus_test |
|||
|
|||
import ( |
|||
"bytes" |
|||
"net/http" |
|||
"net/http/httptest" |
|||
"testing" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
"github.com/prometheus/common/expfmt" |
|||
|
|||
"github.com/prometheus/client_golang/prometheus" |
|||
"github.com/prometheus/client_golang/prometheus/promhttp" |
|||
) |
|||
|
|||
func testHandler(t testing.TB) { |
|||
|
|||
metricVec := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Name: "name", |
|||
Help: "docstring", |
|||
ConstLabels: prometheus.Labels{"constname": "constvalue"}, |
|||
}, |
|||
[]string{"labelname"}, |
|||
) |
|||
|
|||
metricVec.WithLabelValues("val1").Inc() |
|||
metricVec.WithLabelValues("val2").Inc() |
|||
|
|||
externalMetricFamily := &dto.MetricFamily{ |
|||
Name: proto.String("externalname"), |
|||
Help: proto.String("externaldocstring"), |
|||
Type: dto.MetricType_COUNTER.Enum(), |
|||
Metric: []*dto.Metric{ |
|||
{ |
|||
Label: []*dto.LabelPair{ |
|||
{ |
|||
Name: proto.String("externalconstname"), |
|||
Value: proto.String("externalconstvalue"), |
|||
}, |
|||
{ |
|||
Name: proto.String("externallabelname"), |
|||
Value: proto.String("externalval1"), |
|||
}, |
|||
}, |
|||
Counter: &dto.Counter{ |
|||
Value: proto.Float64(1), |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
externalBuf := &bytes.Buffer{} |
|||
enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) |
|||
if err := enc.Encode(externalMetricFamily); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
externalMetricFamilyAsBytes := externalBuf.Bytes() |
|||
externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring |
|||
# TYPE externalname counter |
|||
externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 |
|||
`) |
|||
externalMetricFamilyAsProtoText := []byte(`name: "externalname" |
|||
help: "externaldocstring" |
|||
type: COUNTER |
|||
metric: < |
|||
label: < |
|||
name: "externalconstname" |
|||
value: "externalconstvalue" |
|||
> |
|||
label: < |
|||
name: "externallabelname" |
|||
value: "externalval1" |
|||
> |
|||
counter: < |
|||
value: 1 |
|||
> |
|||
> |
|||
|
|||
`) |
|||
externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric:<label:<name:"externalconstname" value:"externalconstvalue" > label:<name:"externallabelname" value:"externalval1" > counter:<value:1 > > |
|||
`) |
|||
|
|||
expectedMetricFamily := &dto.MetricFamily{ |
|||
Name: proto.String("name"), |
|||
Help: proto.String("docstring"), |
|||
Type: dto.MetricType_COUNTER.Enum(), |
|||
Metric: []*dto.Metric{ |
|||
{ |
|||
Label: []*dto.LabelPair{ |
|||
{ |
|||
Name: proto.String("constname"), |
|||
Value: proto.String("constvalue"), |
|||
}, |
|||
{ |
|||
Name: proto.String("labelname"), |
|||
Value: proto.String("val1"), |
|||
}, |
|||
}, |
|||
Counter: &dto.Counter{ |
|||
Value: proto.Float64(1), |
|||
}, |
|||
}, |
|||
{ |
|||
Label: []*dto.LabelPair{ |
|||
{ |
|||
Name: proto.String("constname"), |
|||
Value: proto.String("constvalue"), |
|||
}, |
|||
{ |
|||
Name: proto.String("labelname"), |
|||
Value: proto.String("val2"), |
|||
}, |
|||
}, |
|||
Counter: &dto.Counter{ |
|||
Value: proto.Float64(1), |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
buf := &bytes.Buffer{} |
|||
enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) |
|||
if err := enc.Encode(expectedMetricFamily); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
expectedMetricFamilyAsBytes := buf.Bytes() |
|||
expectedMetricFamilyAsText := []byte(`# HELP name docstring |
|||
# TYPE name counter |
|||
name{constname="constvalue",labelname="val1"} 1 |
|||
name{constname="constvalue",labelname="val2"} 1 |
|||
`) |
|||
expectedMetricFamilyAsProtoText := []byte(`name: "name" |
|||
help: "docstring" |
|||
type: COUNTER |
|||
metric: < |
|||
label: < |
|||
name: "constname" |
|||
value: "constvalue" |
|||
> |
|||
label: < |
|||
name: "labelname" |
|||
value: "val1" |
|||
> |
|||
counter: < |
|||
value: 1 |
|||
> |
|||
> |
|||
metric: < |
|||
label: < |
|||
name: "constname" |
|||
value: "constvalue" |
|||
> |
|||
label: < |
|||
name: "labelname" |
|||
value: "val2" |
|||
> |
|||
counter: < |
|||
value: 1 |
|||
> |
|||
> |
|||
|
|||
`) |
|||
expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > > |
|||
`) |
|||
|
|||
externalMetricFamilyWithSameName := &dto.MetricFamily{ |
|||
Name: proto.String("name"), |
|||
Help: proto.String("docstring"), |
|||
Type: dto.MetricType_COUNTER.Enum(), |
|||
Metric: []*dto.Metric{ |
|||
{ |
|||
Label: []*dto.LabelPair{ |
|||
{ |
|||
Name: proto.String("constname"), |
|||
Value: proto.String("constvalue"), |
|||
}, |
|||
{ |
|||
Name: proto.String("labelname"), |
|||
Value: proto.String("different_val"), |
|||
}, |
|||
}, |
|||
Counter: &dto.Counter{ |
|||
Value: proto.Float64(42), |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
|
|||
expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"different_val" > counter:<value:42 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val1" > counter:<value:1 > > metric:<label:<name:"constname" value:"constvalue" > label:<name:"labelname" value:"val2" > counter:<value:1 > > |
|||
`) |
|||
|
|||
type output struct { |
|||
headers map[string]string |
|||
body []byte |
|||
} |
|||
|
|||
var scenarios = []struct { |
|||
headers map[string]string |
|||
out output |
|||
collector prometheus.Collector |
|||
externalMF []*dto.MetricFamily |
|||
}{ |
|||
{ // 0
|
|||
headers: map[string]string{ |
|||
"Accept": "foo/bar;q=0.2, dings/bums;q=0.8", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: []byte{}, |
|||
}, |
|||
}, |
|||
{ // 1
|
|||
headers: map[string]string{ |
|||
"Accept": "foo/bar;q=0.2, application/quark;q=0.8", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: []byte{}, |
|||
}, |
|||
}, |
|||
{ // 2
|
|||
headers: map[string]string{ |
|||
"Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: []byte{}, |
|||
}, |
|||
}, |
|||
{ // 3
|
|||
headers: map[string]string{ |
|||
"Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, |
|||
}, |
|||
body: []byte{}, |
|||
}, |
|||
}, |
|||
{ // 4
|
|||
headers: map[string]string{ |
|||
"Accept": "application/json", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: expectedMetricFamilyAsText, |
|||
}, |
|||
collector: metricVec, |
|||
}, |
|||
{ // 5
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, |
|||
}, |
|||
body: expectedMetricFamilyAsBytes, |
|||
}, |
|||
collector: metricVec, |
|||
}, |
|||
{ // 6
|
|||
headers: map[string]string{ |
|||
"Accept": "application/json", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: externalMetricFamilyAsText, |
|||
}, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 7
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, |
|||
}, |
|||
body: externalMetricFamilyAsBytes, |
|||
}, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 8
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsBytes, |
|||
expectedMetricFamilyAsBytes, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 9
|
|||
headers: map[string]string{ |
|||
"Accept": "text/plain", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: []byte{}, |
|||
}, |
|||
}, |
|||
{ // 10
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: expectedMetricFamilyAsText, |
|||
}, |
|||
collector: metricVec, |
|||
}, |
|||
{ // 11
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `text/plain; version=0.0.4`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsText, |
|||
expectedMetricFamilyAsText, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 12
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsBytes, |
|||
expectedMetricFamilyAsBytes, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 13
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsProtoText, |
|||
expectedMetricFamilyAsProtoText, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 14
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsProtoCompactText, |
|||
expectedMetricFamilyAsProtoCompactText, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{externalMetricFamily}, |
|||
}, |
|||
{ // 15
|
|||
headers: map[string]string{ |
|||
"Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", |
|||
}, |
|||
out: output{ |
|||
headers: map[string]string{ |
|||
"Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, |
|||
}, |
|||
body: bytes.Join( |
|||
[][]byte{ |
|||
externalMetricFamilyAsProtoCompactText, |
|||
expectedMetricFamilyMergedWithExternalAsProtoCompactText, |
|||
}, |
|||
[]byte{}, |
|||
), |
|||
}, |
|||
collector: metricVec, |
|||
externalMF: []*dto.MetricFamily{ |
|||
externalMetricFamily, |
|||
externalMetricFamilyWithSameName, |
|||
}, |
|||
}, |
|||
} |
|||
for i, scenario := range scenarios { |
|||
registry := prometheus.NewPedanticRegistry() |
|||
gatherer := prometheus.Gatherer(registry) |
|||
if scenario.externalMF != nil { |
|||
gatherer = prometheus.Gatherers{ |
|||
registry, |
|||
prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { |
|||
return scenario.externalMF, nil |
|||
}), |
|||
} |
|||
} |
|||
|
|||
if scenario.collector != nil { |
|||
registry.Register(scenario.collector) |
|||
} |
|||
writer := httptest.NewRecorder() |
|||
handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) |
|||
request, _ := http.NewRequest("GET", "/", nil) |
|||
for key, value := range scenario.headers { |
|||
request.Header.Add(key, value) |
|||
} |
|||
handler(writer, request) |
|||
|
|||
for key, value := range scenario.out.headers { |
|||
if writer.HeaderMap.Get(key) != value { |
|||
t.Errorf( |
|||
"%d. expected %q for header %q, got %q", |
|||
i, value, key, writer.Header().Get(key), |
|||
) |
|||
} |
|||
} |
|||
|
|||
if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { |
|||
t.Errorf( |
|||
"%d. expected body:\n%s\ngot body:\n%s\n", |
|||
i, scenario.out.body, writer.Body.Bytes(), |
|||
) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestHandler(t *testing.T) { |
|||
testHandler(t) |
|||
} |
|||
|
|||
func BenchmarkHandler(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
testHandler(b) |
|||
} |
|||
} |
|||
|
|||
func TestRegisterWithOrGet(t *testing.T) { |
|||
// Replace the default registerer just to be sure. This is bad, but this
|
|||
// whole test will go away once RegisterOrGet is removed.
|
|||
oldRegisterer := prometheus.DefaultRegisterer |
|||
defer func() { |
|||
prometheus.DefaultRegisterer = oldRegisterer |
|||
}() |
|||
prometheus.DefaultRegisterer = prometheus.NewRegistry() |
|||
original := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Name: "test", |
|||
Help: "help", |
|||
}, |
|||
[]string{"foo", "bar"}, |
|||
) |
|||
equalButNotSame := prometheus.NewCounterVec( |
|||
prometheus.CounterOpts{ |
|||
Name: "test", |
|||
Help: "help", |
|||
}, |
|||
[]string{"foo", "bar"}, |
|||
) |
|||
if err := prometheus.Register(original); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if err := prometheus.Register(equalButNotSame); err == nil { |
|||
t.Fatal("expected error when registringe equal collector") |
|||
} |
|||
existing, err := prometheus.RegisterOrGet(equalButNotSame) |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if existing != original { |
|||
t.Error("expected original collector but got something else") |
|||
} |
|||
if existing == equalButNotSame { |
|||
t.Error("expected original callector but got new one") |
|||
} |
|||
} |
|||
@ -0,0 +1,534 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"math" |
|||
"sort" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/beorn7/perks/quantile" |
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
// quantileLabel is used for the label that defines the quantile in a
|
|||
// summary.
|
|||
const quantileLabel = "quantile" |
|||
|
|||
// A Summary captures individual observations from an event or sample stream and
|
|||
// summarizes them in a manner similar to traditional summary statistics: 1. sum
|
|||
// of observations, 2. observation count, 3. rank estimations.
|
|||
//
|
|||
// A typical use-case is the observation of request latencies. By default, a
|
|||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
|||
// as rank estimations.
|
|||
//
|
|||
// Note that the rank estimations cannot be aggregated in a meaningful way with
|
|||
// the Prometheus query language (i.e. you cannot average or add them). If you
|
|||
// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
|
|||
// queries served across all instances of a service), consider the Histogram
|
|||
// metric type. See the Prometheus documentation for more details.
|
|||
//
|
|||
// To create Summary instances, use NewSummary.
|
|||
type Summary interface { |
|||
Metric |
|||
Collector |
|||
|
|||
// Observe adds a single observation to the summary.
|
|||
Observe(float64) |
|||
} |
|||
|
|||
// DefObjectives are the default Summary quantile values.
|
|||
var ( |
|||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} |
|||
|
|||
errQuantileLabelNotAllowed = fmt.Errorf( |
|||
"%q is not allowed as label name in summaries", quantileLabel, |
|||
) |
|||
) |
|||
|
|||
// Default values for SummaryOpts.
|
|||
const ( |
|||
// DefMaxAge is the default duration for which observations stay
|
|||
// relevant.
|
|||
DefMaxAge time.Duration = 10 * time.Minute |
|||
// DefAgeBuckets is the default number of buckets used to calculate the
|
|||
// age of observations.
|
|||
DefAgeBuckets = 5 |
|||
// DefBufCap is the standard buffer size for collecting Summary observations.
|
|||
DefBufCap = 500 |
|||
) |
|||
|
|||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
|||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
|||
// optional and can safely be left at their zero value.
|
|||
type SummaryOpts struct { |
|||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
|||
// name of the Summary (created by joining these components with
|
|||
// "_"). Only Name is mandatory, the others merely help structuring the
|
|||
// name. Note that the fully-qualified name of the Summary must be a
|
|||
// valid Prometheus metric name.
|
|||
Namespace string |
|||
Subsystem string |
|||
Name string |
|||
|
|||
// Help provides information about this Summary. Mandatory!
|
|||
//
|
|||
// Metrics with the same fully-qualified name must have the same Help
|
|||
// string.
|
|||
Help string |
|||
|
|||
// ConstLabels are used to attach fixed labels to this
|
|||
// Summary. Summaries with the same fully-qualified name must have the
|
|||
// same label names in their ConstLabels.
|
|||
//
|
|||
// Note that in most cases, labels have a value that varies during the
|
|||
// lifetime of a process. Those labels are usually managed with a
|
|||
// SummaryVec. ConstLabels serve only special purposes. One is for the
|
|||
// special case where the value of a label does not change during the
|
|||
// lifetime of a process, e.g. if the revision of the running binary is
|
|||
// put into a label. Another, more advanced purpose is if more than one
|
|||
// Collector needs to collect Summaries with the same fully-qualified
|
|||
// name. In that case, those Summaries must differ in the values of
|
|||
// their ConstLabels. See the Collector examples.
|
|||
//
|
|||
// If the value of a label never changes (not even between binaries),
|
|||
// that label most likely should not be a label at all (but part of the
|
|||
// metric name).
|
|||
ConstLabels Labels |
|||
|
|||
// Objectives defines the quantile rank estimates with their respective
|
|||
// absolute error. If Objectives[q] = e, then the value reported
|
|||
// for q will be the φ-quantile value for some φ between q-e and q+e.
|
|||
// The default value is DefObjectives.
|
|||
Objectives map[float64]float64 |
|||
|
|||
// MaxAge defines the duration for which an observation stays relevant
|
|||
// for the summary. Must be positive. The default value is DefMaxAge.
|
|||
MaxAge time.Duration |
|||
|
|||
// AgeBuckets is the number of buckets used to exclude observations that
|
|||
// are older than MaxAge from the summary. A higher number has a
|
|||
// resource penalty, so only increase it if the higher resolution is
|
|||
// really required. For very high observation rates, you might want to
|
|||
// reduce the number of age buckets. With only one age bucket, you will
|
|||
// effectively see a complete reset of the summary each time MaxAge has
|
|||
// passed. The default value is DefAgeBuckets.
|
|||
AgeBuckets uint32 |
|||
|
|||
// BufCap defines the default sample stream buffer size. The default
|
|||
// value of DefBufCap should suffice for most uses. If there is a need
|
|||
// to increase the value, a multiple of 500 is recommended (because that
|
|||
// is the internal buffer size of the underlying package
|
|||
// "github.com/bmizerany/perks/quantile").
|
|||
BufCap uint32 |
|||
} |
|||
|
|||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
|||
// perk/quantile is actually not working as advertised - and it might be
|
|||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
|||
// summaries in the first place. To avoid using Merge, we are currently adding
|
|||
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
|||
// essentially multiplied by the number of age buckets. When rotating age
|
|||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
|||
// the quantiles from the head stream (no merging required). Result: More effort
|
|||
// on observation time, less effort on scrape time, which is exactly the
|
|||
// opposite of what we try to accomplish, but at least the results are correct.
|
|||
//
|
|||
// The quite elegant previous contraption to merge the age buckets efficiently
|
|||
// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
|
|||
// can't be used anymore.
|
|||
|
|||
// NewSummary creates a new Summary based on the provided SummaryOpts.
|
|||
func NewSummary(opts SummaryOpts) Summary { |
|||
return newSummary( |
|||
NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), |
|||
opts, |
|||
) |
|||
} |
|||
|
|||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { |
|||
if len(desc.variableLabels) != len(labelValues) { |
|||
panic(errInconsistentCardinality) |
|||
} |
|||
|
|||
for _, n := range desc.variableLabels { |
|||
if n == quantileLabel { |
|||
panic(errQuantileLabelNotAllowed) |
|||
} |
|||
} |
|||
for _, lp := range desc.constLabelPairs { |
|||
if lp.GetName() == quantileLabel { |
|||
panic(errQuantileLabelNotAllowed) |
|||
} |
|||
} |
|||
|
|||
if len(opts.Objectives) == 0 { |
|||
opts.Objectives = DefObjectives |
|||
} |
|||
|
|||
if opts.MaxAge < 0 { |
|||
panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) |
|||
} |
|||
if opts.MaxAge == 0 { |
|||
opts.MaxAge = DefMaxAge |
|||
} |
|||
|
|||
if opts.AgeBuckets == 0 { |
|||
opts.AgeBuckets = DefAgeBuckets |
|||
} |
|||
|
|||
if opts.BufCap == 0 { |
|||
opts.BufCap = DefBufCap |
|||
} |
|||
|
|||
s := &summary{ |
|||
desc: desc, |
|||
|
|||
objectives: opts.Objectives, |
|||
sortedObjectives: make([]float64, 0, len(opts.Objectives)), |
|||
|
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
|
|||
hotBuf: make([]float64, 0, opts.BufCap), |
|||
coldBuf: make([]float64, 0, opts.BufCap), |
|||
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), |
|||
} |
|||
s.headStreamExpTime = time.Now().Add(s.streamDuration) |
|||
s.hotBufExpTime = s.headStreamExpTime |
|||
|
|||
for i := uint32(0); i < opts.AgeBuckets; i++ { |
|||
s.streams = append(s.streams, s.newStream()) |
|||
} |
|||
s.headStream = s.streams[0] |
|||
|
|||
for qu := range s.objectives { |
|||
s.sortedObjectives = append(s.sortedObjectives, qu) |
|||
} |
|||
sort.Float64s(s.sortedObjectives) |
|||
|
|||
s.init(s) // Init self-collection.
|
|||
return s |
|||
} |
|||
|
|||
type summary struct { |
|||
selfCollector |
|||
|
|||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
|||
mtx sync.Mutex // Protects every other moving part.
|
|||
// Lock bufMtx before mtx if both are needed.
|
|||
|
|||
desc *Desc |
|||
|
|||
objectives map[float64]float64 |
|||
sortedObjectives []float64 |
|||
|
|||
labelPairs []*dto.LabelPair |
|||
|
|||
sum float64 |
|||
cnt uint64 |
|||
|
|||
hotBuf, coldBuf []float64 |
|||
|
|||
streams []*quantile.Stream |
|||
streamDuration time.Duration |
|||
headStream *quantile.Stream |
|||
headStreamIdx int |
|||
headStreamExpTime, hotBufExpTime time.Time |
|||
} |
|||
|
|||
func (s *summary) Desc() *Desc { |
|||
return s.desc |
|||
} |
|||
|
|||
func (s *summary) Observe(v float64) { |
|||
s.bufMtx.Lock() |
|||
defer s.bufMtx.Unlock() |
|||
|
|||
now := time.Now() |
|||
if now.After(s.hotBufExpTime) { |
|||
s.asyncFlush(now) |
|||
} |
|||
s.hotBuf = append(s.hotBuf, v) |
|||
if len(s.hotBuf) == cap(s.hotBuf) { |
|||
s.asyncFlush(now) |
|||
} |
|||
} |
|||
|
|||
func (s *summary) Write(out *dto.Metric) error { |
|||
sum := &dto.Summary{} |
|||
qs := make([]*dto.Quantile, 0, len(s.objectives)) |
|||
|
|||
s.bufMtx.Lock() |
|||
s.mtx.Lock() |
|||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
|||
s.swapBufs(time.Now()) |
|||
s.bufMtx.Unlock() |
|||
|
|||
s.flushColdBuf() |
|||
sum.SampleCount = proto.Uint64(s.cnt) |
|||
sum.SampleSum = proto.Float64(s.sum) |
|||
|
|||
for _, rank := range s.sortedObjectives { |
|||
var q float64 |
|||
if s.headStream.Count() == 0 { |
|||
q = math.NaN() |
|||
} else { |
|||
q = s.headStream.Query(rank) |
|||
} |
|||
qs = append(qs, &dto.Quantile{ |
|||
Quantile: proto.Float64(rank), |
|||
Value: proto.Float64(q), |
|||
}) |
|||
} |
|||
|
|||
s.mtx.Unlock() |
|||
|
|||
if len(qs) > 0 { |
|||
sort.Sort(quantSort(qs)) |
|||
} |
|||
sum.Quantile = qs |
|||
|
|||
out.Summary = sum |
|||
out.Label = s.labelPairs |
|||
return nil |
|||
} |
|||
|
|||
func (s *summary) newStream() *quantile.Stream { |
|||
return quantile.NewTargeted(s.objectives) |
|||
} |
|||
|
|||
// asyncFlush needs bufMtx locked.
|
|||
func (s *summary) asyncFlush(now time.Time) { |
|||
s.mtx.Lock() |
|||
s.swapBufs(now) |
|||
|
|||
// Unblock the original goroutine that was responsible for the mutation
|
|||
// that triggered the compaction. But hold onto the global non-buffer
|
|||
// state mutex until the operation finishes.
|
|||
go func() { |
|||
s.flushColdBuf() |
|||
s.mtx.Unlock() |
|||
}() |
|||
} |
|||
|
|||
// rotateStreams needs mtx AND bufMtx locked.
|
|||
func (s *summary) maybeRotateStreams() { |
|||
for !s.hotBufExpTime.Equal(s.headStreamExpTime) { |
|||
s.headStream.Reset() |
|||
s.headStreamIdx++ |
|||
if s.headStreamIdx >= len(s.streams) { |
|||
s.headStreamIdx = 0 |
|||
} |
|||
s.headStream = s.streams[s.headStreamIdx] |
|||
s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) |
|||
} |
|||
} |
|||
|
|||
// flushColdBuf needs mtx locked.
|
|||
func (s *summary) flushColdBuf() { |
|||
for _, v := range s.coldBuf { |
|||
for _, stream := range s.streams { |
|||
stream.Insert(v) |
|||
} |
|||
s.cnt++ |
|||
s.sum += v |
|||
} |
|||
s.coldBuf = s.coldBuf[0:0] |
|||
s.maybeRotateStreams() |
|||
} |
|||
|
|||
// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
|
|||
func (s *summary) swapBufs(now time.Time) { |
|||
if len(s.coldBuf) != 0 { |
|||
panic("coldBuf is not empty") |
|||
} |
|||
s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf |
|||
// hotBuf is now empty and gets new expiration set.
|
|||
for now.After(s.hotBufExpTime) { |
|||
s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) |
|||
} |
|||
} |
|||
|
|||
type quantSort []*dto.Quantile |
|||
|
|||
func (s quantSort) Len() int { |
|||
return len(s) |
|||
} |
|||
|
|||
func (s quantSort) Swap(i, j int) { |
|||
s[i], s[j] = s[j], s[i] |
|||
} |
|||
|
|||
func (s quantSort) Less(i, j int) bool { |
|||
return s[i].GetQuantile() < s[j].GetQuantile() |
|||
} |
|||
|
|||
// SummaryVec is a Collector that bundles a set of Summaries that all share the
|
|||
// same Desc, but have different values for their variable labels. This is used
|
|||
// if you want to count the same thing partitioned by various dimensions
|
|||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
|||
// instances with NewSummaryVec.
|
|||
type SummaryVec struct { |
|||
*MetricVec |
|||
} |
|||
|
|||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
|||
// partitioned by the given label names. At least one label name must be
|
|||
// provided.
|
|||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
labelNames, |
|||
opts.ConstLabels, |
|||
) |
|||
return &SummaryVec{ |
|||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { |
|||
return newSummary(desc, opts, lvs...) |
|||
}), |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues replaces the method of the same name in
|
|||
// MetricVec. The difference is that this method returns a Summary and not a
|
|||
// Metric so that no type conversion is required.
|
|||
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { |
|||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) |
|||
if metric != nil { |
|||
return metric.(Summary), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|||
// difference is that this method returns a Summary and not a Metric so that no
|
|||
// type conversion is required.
|
|||
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { |
|||
metric, err := m.MetricVec.GetMetricWith(labels) |
|||
if metric != nil { |
|||
return metric.(Summary), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|||
// error, WithLabelValues allows shortcuts like
|
|||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
|||
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { |
|||
return m.MetricVec.WithLabelValues(lvs...).(Summary) |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|||
// returned an error. By not returning an error, With allows shortcuts like
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
|||
func (m *SummaryVec) With(labels Labels) Summary { |
|||
return m.MetricVec.With(labels).(Summary) |
|||
} |
|||
|
|||
type constSummary struct { |
|||
desc *Desc |
|||
count uint64 |
|||
sum float64 |
|||
quantiles map[float64]float64 |
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
func (s *constSummary) Desc() *Desc { |
|||
return s.desc |
|||
} |
|||
|
|||
func (s *constSummary) Write(out *dto.Metric) error { |
|||
sum := &dto.Summary{} |
|||
qs := make([]*dto.Quantile, 0, len(s.quantiles)) |
|||
|
|||
sum.SampleCount = proto.Uint64(s.count) |
|||
sum.SampleSum = proto.Float64(s.sum) |
|||
|
|||
for rank, q := range s.quantiles { |
|||
qs = append(qs, &dto.Quantile{ |
|||
Quantile: proto.Float64(rank), |
|||
Value: proto.Float64(q), |
|||
}) |
|||
} |
|||
|
|||
if len(qs) > 0 { |
|||
sort.Sort(quantSort(qs)) |
|||
} |
|||
sum.Quantile = qs |
|||
|
|||
out.Summary = sum |
|||
out.Label = s.labelPairs |
|||
|
|||
return nil |
|||
} |
|||
|
|||
// NewConstSummary returns a metric representing a Prometheus summary with fixed
|
|||
// values for the count, sum, and quantiles. As those parameters cannot be
|
|||
// changed, the returned value does not implement the Summary interface (but
|
|||
// only the Metric interface). Users of this package will not have much use for
|
|||
// it in regular operations. However, when implementing custom Collectors, it is
|
|||
// useful as a throw-away metric that is generated on the fly to send it to
|
|||
// Prometheus in the Collect method.
|
|||
//
|
|||
// quantiles maps ranks to quantile values. For example, a median latency of
|
|||
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
|||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
|||
//
|
|||
// NewConstSummary returns an error if the length of labelValues is not
|
|||
// consistent with the variable labels in Desc.
|
|||
func NewConstSummary( |
|||
desc *Desc, |
|||
count uint64, |
|||
sum float64, |
|||
quantiles map[float64]float64, |
|||
labelValues ...string, |
|||
) (Metric, error) { |
|||
if len(desc.variableLabels) != len(labelValues) { |
|||
return nil, errInconsistentCardinality |
|||
} |
|||
return &constSummary{ |
|||
desc: desc, |
|||
count: count, |
|||
sum: sum, |
|||
quantiles: quantiles, |
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
}, nil |
|||
} |
|||
|
|||
// MustNewConstSummary is a version of NewConstSummary that panics where
|
|||
// NewConstMetric would have returned an error.
|
|||
func MustNewConstSummary( |
|||
desc *Desc, |
|||
count uint64, |
|||
sum float64, |
|||
quantiles map[float64]float64, |
|||
labelValues ...string, |
|||
) Metric { |
|||
m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return m |
|||
} |
|||
@ -0,0 +1,347 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"math" |
|||
"math/rand" |
|||
"sort" |
|||
"sync" |
|||
"testing" |
|||
"testing/quick" |
|||
"time" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func benchmarkSummaryObserve(w int, b *testing.B) { |
|||
b.StopTimer() |
|||
|
|||
wg := new(sync.WaitGroup) |
|||
wg.Add(w) |
|||
|
|||
g := new(sync.WaitGroup) |
|||
g.Add(1) |
|||
|
|||
s := NewSummary(SummaryOpts{}) |
|||
|
|||
for i := 0; i < w; i++ { |
|||
go func() { |
|||
g.Wait() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
s.Observe(float64(i)) |
|||
} |
|||
|
|||
wg.Done() |
|||
}() |
|||
} |
|||
|
|||
b.StartTimer() |
|||
g.Done() |
|||
wg.Wait() |
|||
} |
|||
|
|||
func BenchmarkSummaryObserve1(b *testing.B) { |
|||
benchmarkSummaryObserve(1, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryObserve2(b *testing.B) { |
|||
benchmarkSummaryObserve(2, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryObserve4(b *testing.B) { |
|||
benchmarkSummaryObserve(4, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryObserve8(b *testing.B) { |
|||
benchmarkSummaryObserve(8, b) |
|||
} |
|||
|
|||
func benchmarkSummaryWrite(w int, b *testing.B) { |
|||
b.StopTimer() |
|||
|
|||
wg := new(sync.WaitGroup) |
|||
wg.Add(w) |
|||
|
|||
g := new(sync.WaitGroup) |
|||
g.Add(1) |
|||
|
|||
s := NewSummary(SummaryOpts{}) |
|||
|
|||
for i := 0; i < 1000000; i++ { |
|||
s.Observe(float64(i)) |
|||
} |
|||
|
|||
for j := 0; j < w; j++ { |
|||
outs := make([]dto.Metric, b.N) |
|||
|
|||
go func(o []dto.Metric) { |
|||
g.Wait() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
s.Write(&o[i]) |
|||
} |
|||
|
|||
wg.Done() |
|||
}(outs) |
|||
} |
|||
|
|||
b.StartTimer() |
|||
g.Done() |
|||
wg.Wait() |
|||
} |
|||
|
|||
func BenchmarkSummaryWrite1(b *testing.B) { |
|||
benchmarkSummaryWrite(1, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryWrite2(b *testing.B) { |
|||
benchmarkSummaryWrite(2, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryWrite4(b *testing.B) { |
|||
benchmarkSummaryWrite(4, b) |
|||
} |
|||
|
|||
func BenchmarkSummaryWrite8(b *testing.B) { |
|||
benchmarkSummaryWrite(8, b) |
|||
} |
|||
|
|||
func TestSummaryConcurrency(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping test in short mode.") |
|||
} |
|||
|
|||
rand.Seed(42) |
|||
|
|||
it := func(n uint32) bool { |
|||
mutations := int(n%1e4 + 1e4) |
|||
concLevel := int(n%5 + 1) |
|||
total := mutations * concLevel |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
sum := NewSummary(SummaryOpts{ |
|||
Name: "test_summary", |
|||
Help: "helpless", |
|||
}) |
|||
|
|||
allVars := make([]float64, total) |
|||
var sampleSum float64 |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
v := rand.NormFloat64() |
|||
vals[j] = v |
|||
allVars[i*mutations+j] = v |
|||
sampleSum += v |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for _, v := range vals { |
|||
sum.Observe(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
sort.Float64s(allVars) |
|||
start.Done() |
|||
end.Wait() |
|||
|
|||
m := &dto.Metric{} |
|||
sum.Write(m) |
|||
if got, want := int(*m.Summary.SampleCount), total; got != want { |
|||
t.Errorf("got sample count %d, want %d", got, want) |
|||
} |
|||
if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { |
|||
t.Errorf("got sample sum %f, want %f", got, want) |
|||
} |
|||
|
|||
objectives := make([]float64, 0, len(DefObjectives)) |
|||
for qu := range DefObjectives { |
|||
objectives = append(objectives, qu) |
|||
} |
|||
sort.Float64s(objectives) |
|||
|
|||
for i, wantQ := range objectives { |
|||
ε := DefObjectives[wantQ] |
|||
gotQ := *m.Summary.Quantile[i].Quantile |
|||
gotV := *m.Summary.Quantile[i].Value |
|||
min, max := getBounds(allVars, wantQ, ε) |
|||
if gotQ != wantQ { |
|||
t.Errorf("got quantile %f, want %f", gotQ, wantQ) |
|||
} |
|||
if gotV < min || gotV > max { |
|||
t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Error(err) |
|||
} |
|||
} |
|||
|
|||
func TestSummaryVecConcurrency(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping test in short mode.") |
|||
} |
|||
|
|||
rand.Seed(42) |
|||
|
|||
objectives := make([]float64, 0, len(DefObjectives)) |
|||
for qu := range DefObjectives { |
|||
|
|||
objectives = append(objectives, qu) |
|||
} |
|||
sort.Float64s(objectives) |
|||
|
|||
it := func(n uint32) bool { |
|||
mutations := int(n%1e4 + 1e4) |
|||
concLevel := int(n%7 + 1) |
|||
vecLength := int(n%3 + 1) |
|||
|
|||
var start, end sync.WaitGroup |
|||
start.Add(1) |
|||
end.Add(concLevel) |
|||
|
|||
sum := NewSummaryVec( |
|||
SummaryOpts{ |
|||
Name: "test_summary", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"label"}, |
|||
) |
|||
|
|||
allVars := make([][]float64, vecLength) |
|||
sampleSums := make([]float64, vecLength) |
|||
for i := 0; i < concLevel; i++ { |
|||
vals := make([]float64, mutations) |
|||
picks := make([]int, mutations) |
|||
for j := 0; j < mutations; j++ { |
|||
v := rand.NormFloat64() |
|||
vals[j] = v |
|||
pick := rand.Intn(vecLength) |
|||
picks[j] = pick |
|||
allVars[pick] = append(allVars[pick], v) |
|||
sampleSums[pick] += v |
|||
} |
|||
|
|||
go func(vals []float64) { |
|||
start.Wait() |
|||
for i, v := range vals { |
|||
sum.WithLabelValues(string('A' + picks[i])).Observe(v) |
|||
} |
|||
end.Done() |
|||
}(vals) |
|||
} |
|||
for _, vars := range allVars { |
|||
sort.Float64s(vars) |
|||
} |
|||
start.Done() |
|||
end.Wait() |
|||
|
|||
for i := 0; i < vecLength; i++ { |
|||
m := &dto.Metric{} |
|||
s := sum.WithLabelValues(string('A' + i)) |
|||
s.Write(m) |
|||
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { |
|||
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) |
|||
} |
|||
if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { |
|||
t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) |
|||
} |
|||
for j, wantQ := range objectives { |
|||
ε := DefObjectives[wantQ] |
|||
gotQ := *m.Summary.Quantile[j].Quantile |
|||
gotV := *m.Summary.Quantile[j].Value |
|||
min, max := getBounds(allVars[i], wantQ, ε) |
|||
if gotQ != wantQ { |
|||
t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) |
|||
} |
|||
if gotV < min || gotV > max { |
|||
t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) |
|||
} |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
if err := quick.Check(it, nil); err != nil { |
|||
t.Error(err) |
|||
} |
|||
} |
|||
|
|||
func TestSummaryDecay(t *testing.T) { |
|||
if testing.Short() { |
|||
t.Skip("Skipping test in short mode.") |
|||
// More because it depends on timing than because it is particularly long...
|
|||
} |
|||
|
|||
sum := NewSummary(SummaryOpts{ |
|||
Name: "test_summary", |
|||
Help: "helpless", |
|||
MaxAge: 100 * time.Millisecond, |
|||
Objectives: map[float64]float64{0.1: 0.001}, |
|||
AgeBuckets: 10, |
|||
}) |
|||
|
|||
m := &dto.Metric{} |
|||
i := 0 |
|||
tick := time.NewTicker(time.Millisecond) |
|||
for range tick.C { |
|||
i++ |
|||
sum.Observe(float64(i)) |
|||
if i%10 == 0 { |
|||
sum.Write(m) |
|||
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { |
|||
t.Errorf("%d. got %f, want %f", i, got, want) |
|||
} |
|||
m.Reset() |
|||
} |
|||
if i >= 1000 { |
|||
break |
|||
} |
|||
} |
|||
tick.Stop() |
|||
// Wait for MaxAge without observations and make sure quantiles are NaN.
|
|||
time.Sleep(100 * time.Millisecond) |
|||
sum.Write(m) |
|||
if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { |
|||
t.Errorf("got %f, want NaN after expiration", got) |
|||
} |
|||
} |
|||
|
|||
func getBounds(vars []float64, q, ε float64) (min, max float64) { |
|||
// TODO(beorn7): This currently tolerates an error of up to 2*ε. The
|
|||
// error must be at most ε, but for some reason, it's sometimes slightly
|
|||
// higher. That's a bug.
|
|||
n := float64(len(vars)) |
|||
lower := int((q - 2*ε) * n) |
|||
upper := int(math.Ceil((q + 2*ε) * n)) |
|||
min = vars[0] |
|||
if lower > 1 { |
|||
min = vars[lower-1] |
|||
} |
|||
max = vars[len(vars)-1] |
|||
if upper < len(vars) { |
|||
max = vars[upper-1] |
|||
} |
|||
return |
|||
} |
|||
@ -0,0 +1,138 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
// Untyped is a Metric that represents a single numerical value that can
|
|||
// arbitrarily go up and down.
|
|||
//
|
|||
// An Untyped metric works the same as a Gauge. The only difference is that to
|
|||
// no type information is implied.
|
|||
//
|
|||
// To create Untyped instances, use NewUntyped.
|
|||
type Untyped interface { |
|||
Metric |
|||
Collector |
|||
|
|||
// Set sets the Untyped metric to an arbitrary value.
|
|||
Set(float64) |
|||
// Inc increments the Untyped metric by 1.
|
|||
Inc() |
|||
// Dec decrements the Untyped metric by 1.
|
|||
Dec() |
|||
// Add adds the given value to the Untyped metric. (The value can be
|
|||
// negative, resulting in a decrease.)
|
|||
Add(float64) |
|||
// Sub subtracts the given value from the Untyped metric. (The value can
|
|||
// be negative, resulting in an increase.)
|
|||
Sub(float64) |
|||
} |
|||
|
|||
// UntypedOpts is an alias for Opts. See there for doc comments.
|
|||
type UntypedOpts Opts |
|||
|
|||
// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
|
|||
func NewUntyped(opts UntypedOpts) Untyped { |
|||
return newValue(NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), UntypedValue, 0) |
|||
} |
|||
|
|||
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
|
|||
// share the same Desc, but have different values for their variable
|
|||
// labels. This is used if you want to count the same thing partitioned by
|
|||
// various dimensions. Create instances with NewUntypedVec.
|
|||
type UntypedVec struct { |
|||
*MetricVec |
|||
} |
|||
|
|||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
|||
// partitioned by the given label names. At least one label name must be
|
|||
// provided.
|
|||
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { |
|||
desc := NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
labelNames, |
|||
opts.ConstLabels, |
|||
) |
|||
return &UntypedVec{ |
|||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { |
|||
return newValue(desc, UntypedValue, 0, lvs...) |
|||
}), |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues replaces the method of the same name in
|
|||
// MetricVec. The difference is that this method returns an Untyped and not a
|
|||
// Metric so that no type conversion is required.
|
|||
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { |
|||
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) |
|||
if metric != nil { |
|||
return metric.(Untyped), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// GetMetricWith replaces the method of the same name in MetricVec. The
|
|||
// difference is that this method returns an Untyped and not a Metric so that no
|
|||
// type conversion is required.
|
|||
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { |
|||
metric, err := m.MetricVec.GetMetricWith(labels) |
|||
if metric != nil { |
|||
return metric.(Untyped), err |
|||
} |
|||
return nil, err |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
|||
// GetMetricWithLabelValues would have returned an error. By not returning an
|
|||
// error, WithLabelValues allows shortcuts like
|
|||
// myVec.WithLabelValues("404", "GET").Add(42)
|
|||
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { |
|||
return m.MetricVec.WithLabelValues(lvs...).(Untyped) |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
|||
// returned an error. By not returning an error, With allows shortcuts like
|
|||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
|||
func (m *UntypedVec) With(labels Labels) Untyped { |
|||
return m.MetricVec.With(labels).(Untyped) |
|||
} |
|||
|
|||
// UntypedFunc is an Untyped whose value is determined at collect time by
|
|||
// calling a provided function.
|
|||
//
|
|||
// To create UntypedFunc instances, use NewUntypedFunc.
|
|||
type UntypedFunc interface { |
|||
Metric |
|||
Collector |
|||
} |
|||
|
|||
// NewUntypedFunc creates a new UntypedFunc based on the provided
|
|||
// UntypedOpts. The value reported is determined by calling the given function
|
|||
// from within the Write method. Take into account that metric collection may
|
|||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|||
// the case where an UntypedFunc is directly registered with Prometheus, the
|
|||
// provided function must be concurrency-safe.
|
|||
func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { |
|||
return newValueFunc(NewDesc( |
|||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), |
|||
opts.Help, |
|||
nil, |
|||
opts.ConstLabels, |
|||
), UntypedValue, function) |
|||
} |
|||
@ -0,0 +1,234 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"errors" |
|||
"fmt" |
|||
"math" |
|||
"sort" |
|||
"sync/atomic" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
) |
|||
|
|||
// ValueType is an enumeration of metric types that represent a simple value.
|
|||
type ValueType int |
|||
|
|||
// Possible values for the ValueType enum.
|
|||
const ( |
|||
_ ValueType = iota |
|||
CounterValue |
|||
GaugeValue |
|||
UntypedValue |
|||
) |
|||
|
|||
var errInconsistentCardinality = errors.New("inconsistent label cardinality") |
|||
|
|||
// value is a generic metric for simple values. It implements Metric, Collector,
|
|||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
|||
// ValueType. This is a low-level building block used by the library to back the
|
|||
// implementations of Counter, Gauge, and Untyped.
|
|||
type value struct { |
|||
// valBits containst the bits of the represented float64 value. It has
|
|||
// to go first in the struct to guarantee alignment for atomic
|
|||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
|||
valBits uint64 |
|||
|
|||
selfCollector |
|||
|
|||
desc *Desc |
|||
valType ValueType |
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
|||
// sample value and label values. It panics if the number of label
|
|||
// values is different from the number of variable labels in Desc.
|
|||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { |
|||
if len(labelValues) != len(desc.variableLabels) { |
|||
panic(errInconsistentCardinality) |
|||
} |
|||
result := &value{ |
|||
desc: desc, |
|||
valType: valueType, |
|||
valBits: math.Float64bits(val), |
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
} |
|||
result.init(result) |
|||
return result |
|||
} |
|||
|
|||
func (v *value) Desc() *Desc { |
|||
return v.desc |
|||
} |
|||
|
|||
func (v *value) Set(val float64) { |
|||
atomic.StoreUint64(&v.valBits, math.Float64bits(val)) |
|||
} |
|||
|
|||
func (v *value) Inc() { |
|||
v.Add(1) |
|||
} |
|||
|
|||
func (v *value) Dec() { |
|||
v.Add(-1) |
|||
} |
|||
|
|||
func (v *value) Add(val float64) { |
|||
for { |
|||
oldBits := atomic.LoadUint64(&v.valBits) |
|||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val) |
|||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { |
|||
return |
|||
} |
|||
} |
|||
} |
|||
|
|||
func (v *value) Sub(val float64) { |
|||
v.Add(val * -1) |
|||
} |
|||
|
|||
func (v *value) Write(out *dto.Metric) error { |
|||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) |
|||
return populateMetric(v.valType, val, v.labelPairs, out) |
|||
} |
|||
|
|||
// valueFunc is a generic metric for simple values retrieved on collect time
|
|||
// from a function. It implements Metric and Collector. Its effective type is
|
|||
// determined by ValueType. This is a low-level building block used by the
|
|||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
|||
// UntypedFunc.
|
|||
type valueFunc struct { |
|||
selfCollector |
|||
|
|||
desc *Desc |
|||
valType ValueType |
|||
function func() float64 |
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
// newValueFunc returns a newly allocated valueFunc with the given Desc and
|
|||
// ValueType. The value reported is determined by calling the given function
|
|||
// from within the Write method. Take into account that metric collection may
|
|||
// happen concurrently. If that results in concurrent calls to Write, like in
|
|||
// the case where a valueFunc is directly registered with Prometheus, the
|
|||
// provided function must be concurrency-safe.
|
|||
func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { |
|||
result := &valueFunc{ |
|||
desc: desc, |
|||
valType: valueType, |
|||
function: function, |
|||
labelPairs: makeLabelPairs(desc, nil), |
|||
} |
|||
result.init(result) |
|||
return result |
|||
} |
|||
|
|||
func (v *valueFunc) Desc() *Desc { |
|||
return v.desc |
|||
} |
|||
|
|||
func (v *valueFunc) Write(out *dto.Metric) error { |
|||
return populateMetric(v.valType, v.function(), v.labelPairs, out) |
|||
} |
|||
|
|||
// NewConstMetric returns a metric with one fixed value that cannot be
|
|||
// changed. Users of this package will not have much use for it in regular
|
|||
// operations. However, when implementing custom Collectors, it is useful as a
|
|||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
|||
// the Collect method. NewConstMetric returns an error if the length of
|
|||
// labelValues is not consistent with the variable labels in Desc.
|
|||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { |
|||
if len(desc.variableLabels) != len(labelValues) { |
|||
return nil, errInconsistentCardinality |
|||
} |
|||
return &constMetric{ |
|||
desc: desc, |
|||
valType: valueType, |
|||
val: value, |
|||
labelPairs: makeLabelPairs(desc, labelValues), |
|||
}, nil |
|||
} |
|||
|
|||
// MustNewConstMetric is a version of NewConstMetric that panics where
|
|||
// NewConstMetric would have returned an error.
|
|||
func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { |
|||
m, err := NewConstMetric(desc, valueType, value, labelValues...) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return m |
|||
} |
|||
|
|||
type constMetric struct { |
|||
desc *Desc |
|||
valType ValueType |
|||
val float64 |
|||
labelPairs []*dto.LabelPair |
|||
} |
|||
|
|||
func (m *constMetric) Desc() *Desc { |
|||
return m.desc |
|||
} |
|||
|
|||
func (m *constMetric) Write(out *dto.Metric) error { |
|||
return populateMetric(m.valType, m.val, m.labelPairs, out) |
|||
} |
|||
|
|||
func populateMetric( |
|||
t ValueType, |
|||
v float64, |
|||
labelPairs []*dto.LabelPair, |
|||
m *dto.Metric, |
|||
) error { |
|||
m.Label = labelPairs |
|||
switch t { |
|||
case CounterValue: |
|||
m.Counter = &dto.Counter{Value: proto.Float64(v)} |
|||
case GaugeValue: |
|||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)} |
|||
case UntypedValue: |
|||
m.Untyped = &dto.Untyped{Value: proto.Float64(v)} |
|||
default: |
|||
return fmt.Errorf("encountered unknown type %v", t) |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { |
|||
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) |
|||
if totalLen == 0 { |
|||
// Super fast path.
|
|||
return nil |
|||
} |
|||
if len(desc.variableLabels) == 0 { |
|||
// Moderately fast path.
|
|||
return desc.constLabelPairs |
|||
} |
|||
labelPairs := make([]*dto.LabelPair, 0, totalLen) |
|||
for i, n := range desc.variableLabels { |
|||
labelPairs = append(labelPairs, &dto.LabelPair{ |
|||
Name: proto.String(n), |
|||
Value: proto.String(labelValues[i]), |
|||
}) |
|||
} |
|||
for _, lp := range desc.constLabelPairs { |
|||
labelPairs = append(labelPairs, lp) |
|||
} |
|||
sort.Sort(LabelPairSorter(labelPairs)) |
|||
return labelPairs |
|||
} |
|||
@ -0,0 +1,404 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"sync" |
|||
|
|||
"github.com/prometheus/common/model" |
|||
) |
|||
|
|||
// MetricVec is a Collector to bundle metrics of the same name that
|
|||
// differ in their label values. MetricVec is usually not used directly but as a
|
|||
// building block for implementations of vectors of a given metric
|
|||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
|||
// provided in this package.
|
|||
type MetricVec struct { |
|||
mtx sync.RWMutex // Protects the children.
|
|||
children map[uint64][]metricWithLabelValues |
|||
desc *Desc |
|||
|
|||
newMetric func(labelValues ...string) Metric |
|||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
|||
hashAddByte func(h uint64, b byte) uint64 |
|||
} |
|||
|
|||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
|||
// returned for embedding into another struct.
|
|||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { |
|||
return &MetricVec{ |
|||
children: map[uint64][]metricWithLabelValues{}, |
|||
desc: desc, |
|||
newMetric: newMetric, |
|||
hashAdd: hashAdd, |
|||
hashAddByte: hashAddByte, |
|||
} |
|||
} |
|||
|
|||
// metricWithLabelValues provides the metric and its label values for
|
|||
// disambiguation on hash collision.
|
|||
type metricWithLabelValues struct { |
|||
values []string |
|||
metric Metric |
|||
} |
|||
|
|||
// Describe implements Collector. The length of the returned slice
|
|||
// is always one.
|
|||
func (m *MetricVec) Describe(ch chan<- *Desc) { |
|||
ch <- m.desc |
|||
} |
|||
|
|||
// Collect implements Collector.
|
|||
func (m *MetricVec) Collect(ch chan<- Metric) { |
|||
m.mtx.RLock() |
|||
defer m.mtx.RUnlock() |
|||
|
|||
for _, metrics := range m.children { |
|||
for _, metric := range metrics { |
|||
ch <- metric.metric |
|||
} |
|||
} |
|||
} |
|||
|
|||
// GetMetricWithLabelValues returns the Metric for the given slice of label
|
|||
// values (same order as the VariableLabels in Desc). If that combination of
|
|||
// label values is accessed for the first time, a new Metric is created.
|
|||
//
|
|||
// It is possible to call this method without using the returned Metric to only
|
|||
// create the new Metric but leave it at its start value (e.g. a Summary or
|
|||
// Histogram without any observations). See also the SummaryVec example.
|
|||
//
|
|||
// Keeping the Metric for later use is possible (and should be considered if
|
|||
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
|
|||
// Delete can be used to delete the Metric from the MetricVec. In that case, the
|
|||
// Metric will still exist, but it will not be exported anymore, even if a
|
|||
// Metric with the same label values is created later. See also the CounterVec
|
|||
// example.
|
|||
//
|
|||
// An error is returned if the number of label values is not the same as the
|
|||
// number of VariableLabels in Desc.
|
|||
//
|
|||
// Note that for more than one label value, this method is prone to mistakes
|
|||
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
|
|||
// an alternative to avoid that type of mistake. For higher label numbers, the
|
|||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|||
// with a performance overhead (for creating and processing the Labels map).
|
|||
// See also the GaugeVec example.
|
|||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { |
|||
h, err := m.hashLabelValues(lvs) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil |
|||
} |
|||
|
|||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
|||
// must match those of the VariableLabels in Desc). If that label map is
|
|||
// accessed for the first time, a new Metric is created. Implications of
|
|||
// creating a Metric without using it and keeping the Metric for later use are
|
|||
// the same as for GetMetricWithLabelValues.
|
|||
//
|
|||
// An error is returned if the number and names of the Labels are inconsistent
|
|||
// with those of the VariableLabels in Desc.
|
|||
//
|
|||
// This method is used for the same purpose as
|
|||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
|||
// methods.
|
|||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { |
|||
h, err := m.hashLabels(labels) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
|
|||
return m.getOrCreateMetricWithLabels(h, labels), nil |
|||
} |
|||
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
|||
// occurs. The method allows neat syntax like:
|
|||
// httpReqs.WithLabelValues("404", "POST").Inc()
|
|||
func (m *MetricVec) WithLabelValues(lvs ...string) Metric { |
|||
metric, err := m.GetMetricWithLabelValues(lvs...) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return metric |
|||
} |
|||
|
|||
// With works as GetMetricWith, but panics if an error occurs. The method allows
|
|||
// neat syntax like:
|
|||
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
|
|||
func (m *MetricVec) With(labels Labels) Metric { |
|||
metric, err := m.GetMetricWith(labels) |
|||
if err != nil { |
|||
panic(err) |
|||
} |
|||
return metric |
|||
} |
|||
|
|||
// DeleteLabelValues removes the metric where the variable labels are the same
|
|||
// as those passed in as labels (same order as the VariableLabels in Desc). It
|
|||
// returns true if a metric was deleted.
|
|||
//
|
|||
// It is not an error if the number of label values is not the same as the
|
|||
// number of VariableLabels in Desc. However, such inconsistent label count can
|
|||
// never match an actual Metric, so the method will always return false in that
|
|||
// case.
|
|||
//
|
|||
// Note that for more than one label value, this method is prone to mistakes
|
|||
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
|
|||
// alternative to avoid that type of mistake. For higher label numbers, the
|
|||
// latter has a much more readable (albeit more verbose) syntax, but it comes
|
|||
// with a performance overhead (for creating and processing the Labels map).
|
|||
// See also the CounterVec example.
|
|||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { |
|||
m.mtx.Lock() |
|||
defer m.mtx.Unlock() |
|||
|
|||
h, err := m.hashLabelValues(lvs) |
|||
if err != nil { |
|||
return false |
|||
} |
|||
return m.deleteByHashWithLabelValues(h, lvs) |
|||
} |
|||
|
|||
// Delete deletes the metric where the variable labels are the same as those
|
|||
// passed in as labels. It returns true if a metric was deleted.
|
|||
//
|
|||
// It is not an error if the number and names of the Labels are inconsistent
|
|||
// with those of the VariableLabels in the Desc of the MetricVec. However, such
|
|||
// inconsistent Labels can never match an actual Metric, so the method will
|
|||
// always return false in that case.
|
|||
//
|
|||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
|||
// there for pros and cons of the two methods.
|
|||
func (m *MetricVec) Delete(labels Labels) bool { |
|||
m.mtx.Lock() |
|||
defer m.mtx.Unlock() |
|||
|
|||
h, err := m.hashLabels(labels) |
|||
if err != nil { |
|||
return false |
|||
} |
|||
|
|||
return m.deleteByHashWithLabels(h, labels) |
|||
} |
|||
|
|||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
|||
// there are multiple matches in the bucket, use lvs to select a metric and
|
|||
// remove only that metric.
|
|||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { |
|||
metrics, ok := m.children[h] |
|||
if !ok { |
|||
return false |
|||
} |
|||
|
|||
i := m.findMetricWithLabelValues(metrics, lvs) |
|||
if i >= len(metrics) { |
|||
return false |
|||
} |
|||
|
|||
if len(metrics) > 1 { |
|||
m.children[h] = append(metrics[:i], metrics[i+1:]...) |
|||
} else { |
|||
delete(m.children, h) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
|||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
|||
// only that metric.
|
|||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { |
|||
metrics, ok := m.children[h] |
|||
if !ok { |
|||
return false |
|||
} |
|||
i := m.findMetricWithLabels(metrics, labels) |
|||
if i >= len(metrics) { |
|||
return false |
|||
} |
|||
|
|||
if len(metrics) > 1 { |
|||
m.children[h] = append(metrics[:i], metrics[i+1:]...) |
|||
} else { |
|||
delete(m.children, h) |
|||
} |
|||
return true |
|||
} |
|||
|
|||
// Reset deletes all metrics in this vector.
|
|||
func (m *MetricVec) Reset() { |
|||
m.mtx.Lock() |
|||
defer m.mtx.Unlock() |
|||
|
|||
for h := range m.children { |
|||
delete(m.children, h) |
|||
} |
|||
} |
|||
|
|||
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { |
|||
if len(vals) != len(m.desc.variableLabels) { |
|||
return 0, errInconsistentCardinality |
|||
} |
|||
h := hashNew() |
|||
for _, val := range vals { |
|||
h = m.hashAdd(h, val) |
|||
h = m.hashAddByte(h, model.SeparatorByte) |
|||
} |
|||
return h, nil |
|||
} |
|||
|
|||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { |
|||
if len(labels) != len(m.desc.variableLabels) { |
|||
return 0, errInconsistentCardinality |
|||
} |
|||
h := hashNew() |
|||
for _, label := range m.desc.variableLabels { |
|||
val, ok := labels[label] |
|||
if !ok { |
|||
return 0, fmt.Errorf("label name %q missing in label map", label) |
|||
} |
|||
h = m.hashAdd(h, val) |
|||
h = m.hashAddByte(h, model.SeparatorByte) |
|||
} |
|||
return h, nil |
|||
} |
|||
|
|||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
|||
// or creates it and returns the new one.
|
|||
//
|
|||
// This function holds the mutex.
|
|||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { |
|||
m.mtx.RLock() |
|||
metric, ok := m.getMetricWithLabelValues(hash, lvs) |
|||
m.mtx.RUnlock() |
|||
if ok { |
|||
return metric |
|||
} |
|||
|
|||
m.mtx.Lock() |
|||
defer m.mtx.Unlock() |
|||
metric, ok = m.getMetricWithLabelValues(hash, lvs) |
|||
if !ok { |
|||
// Copy to avoid allocation in case wo don't go down this code path.
|
|||
copiedLVs := make([]string, len(lvs)) |
|||
copy(copiedLVs, lvs) |
|||
metric = m.newMetric(copiedLVs...) |
|||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) |
|||
} |
|||
return metric |
|||
} |
|||
|
|||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
|||
// or creates it and returns the new one.
|
|||
//
|
|||
// This function holds the mutex.
|
|||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { |
|||
m.mtx.RLock() |
|||
metric, ok := m.getMetricWithLabels(hash, labels) |
|||
m.mtx.RUnlock() |
|||
if ok { |
|||
return metric |
|||
} |
|||
|
|||
m.mtx.Lock() |
|||
defer m.mtx.Unlock() |
|||
metric, ok = m.getMetricWithLabels(hash, labels) |
|||
if !ok { |
|||
lvs := m.extractLabelValues(labels) |
|||
metric = m.newMetric(lvs...) |
|||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) |
|||
} |
|||
return metric |
|||
} |
|||
|
|||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
|||
// the hash space. Must be called while holding read mutex.
|
|||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { |
|||
metrics, ok := m.children[h] |
|||
if ok { |
|||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { |
|||
return metrics[i].metric, true |
|||
} |
|||
} |
|||
return nil, false |
|||
} |
|||
|
|||
// getMetricWithLabels gets a metric while handling possible collisions in
|
|||
// the hash space. Must be called while holding read mutex.
|
|||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { |
|||
metrics, ok := m.children[h] |
|||
if ok { |
|||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { |
|||
return metrics[i].metric, true |
|||
} |
|||
} |
|||
return nil, false |
|||
} |
|||
|
|||
// findMetricWithLabelValues returns the index of the matching metric or
|
|||
// len(metrics) if not found.
|
|||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { |
|||
for i, metric := range metrics { |
|||
if m.matchLabelValues(metric.values, lvs) { |
|||
return i |
|||
} |
|||
} |
|||
return len(metrics) |
|||
} |
|||
|
|||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
|||
// if not found.
|
|||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { |
|||
for i, metric := range metrics { |
|||
if m.matchLabels(metric.values, labels) { |
|||
return i |
|||
} |
|||
} |
|||
return len(metrics) |
|||
} |
|||
|
|||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { |
|||
if len(values) != len(lvs) { |
|||
return false |
|||
} |
|||
for i, v := range values { |
|||
if v != lvs[i] { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool { |
|||
if len(labels) != len(values) { |
|||
return false |
|||
} |
|||
for i, k := range m.desc.variableLabels { |
|||
if values[i] != labels[k] { |
|||
return false |
|||
} |
|||
} |
|||
return true |
|||
} |
|||
|
|||
func (m *MetricVec) extractLabelValues(labels Labels) []string { |
|||
labelValues := make([]string, len(labels)) |
|||
for i, k := range m.desc.variableLabels { |
|||
labelValues[i] = labels[k] |
|||
} |
|||
return labelValues |
|||
} |
|||
@ -0,0 +1,312 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package prometheus |
|||
|
|||
import ( |
|||
"fmt" |
|||
"testing" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
func TestDelete(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
testDelete(t, vec) |
|||
} |
|||
|
|||
func TestDeleteWithCollisions(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 } |
|||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } |
|||
testDelete(t, vec) |
|||
} |
|||
|
|||
func testDelete(t *testing.T, vec *UntypedVec) { |
|||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
|
|||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) |
|||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
|
|||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) |
|||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
|
|||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) |
|||
if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
} |
|||
|
|||
func TestDeleteLabelValues(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
testDeleteLabelValues(t, vec) |
|||
} |
|||
|
|||
func TestDeleteLabelValuesWithCollisions(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 } |
|||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } |
|||
testDeleteLabelValues(t, vec) |
|||
} |
|||
|
|||
func testDeleteLabelValues(t *testing.T, vec *UntypedVec) { |
|||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
|
|||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) |
|||
vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision.
|
|||
if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
|
|||
vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) |
|||
// Delete out of order.
|
|||
if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
if got, want := vec.DeleteLabelValues("v1"), false; got != want { |
|||
t.Errorf("got %v, want %v", got, want) |
|||
} |
|||
} |
|||
|
|||
func TestMetricVec(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
testMetricVec(t, vec) |
|||
} |
|||
|
|||
func TestMetricVecWithCollisions(t *testing.T) { |
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"l1", "l2"}, |
|||
) |
|||
vec.hashAdd = func(h uint64, s string) uint64 { return 1 } |
|||
vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } |
|||
testMetricVec(t, vec) |
|||
} |
|||
|
|||
func testMetricVec(t *testing.T, vec *UntypedVec) { |
|||
vec.Reset() // Actually test Reset now!
|
|||
|
|||
var pair [2]string |
|||
// Keep track of metrics.
|
|||
expected := map[[2]string]int{} |
|||
|
|||
for i := 0; i < 1000; i++ { |
|||
pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
|
|||
expected[pair]++ |
|||
vec.WithLabelValues(pair[0], pair[1]).Inc() |
|||
|
|||
expected[[2]string{"v1", "v2"}]++ |
|||
vec.WithLabelValues("v1", "v2").(Untyped).Inc() |
|||
} |
|||
|
|||
var total int |
|||
for _, metrics := range vec.children { |
|||
for _, metric := range metrics { |
|||
total++ |
|||
copy(pair[:], metric.values) |
|||
|
|||
var metricOut dto.Metric |
|||
if err := metric.metric.Write(&metricOut); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
actual := *metricOut.Untyped.Value |
|||
|
|||
var actualPair [2]string |
|||
for i, label := range metricOut.Label { |
|||
actualPair[i] = *label.Value |
|||
} |
|||
|
|||
// Test output pair against metric.values to ensure we've selected
|
|||
// the right one. We check this to ensure the below check means
|
|||
// anything at all.
|
|||
if actualPair != pair { |
|||
t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) |
|||
} |
|||
|
|||
if actual != float64(expected[pair]) { |
|||
t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) |
|||
} |
|||
} |
|||
} |
|||
|
|||
if total != len(expected) { |
|||
t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) |
|||
} |
|||
|
|||
vec.Reset() |
|||
|
|||
if len(vec.children) > 0 { |
|||
t.Fatalf("reset failed") |
|||
} |
|||
} |
|||
|
|||
func TestCounterVecEndToEndWithCollision(t *testing.T) { |
|||
vec := NewCounterVec( |
|||
CounterOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
[]string{"labelname"}, |
|||
) |
|||
vec.WithLabelValues("77kepQFQ8Kl").Inc() |
|||
vec.WithLabelValues("!0IC=VloaY").Add(2) |
|||
|
|||
m := &dto.Metric{} |
|||
if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { |
|||
t.Errorf("got label value %q, want %q", got, want) |
|||
} |
|||
if got, want := m.GetCounter().GetValue(), 1.; got != want { |
|||
t.Errorf("got value %f, want %f", got, want) |
|||
} |
|||
m.Reset() |
|||
if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { |
|||
t.Errorf("got label value %q, want %q", got, want) |
|||
} |
|||
if got, want := m.GetCounter().GetValue(), 2.; got != want { |
|||
t.Errorf("got value %f, want %f", got, want) |
|||
} |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValues(b, map[string][]string{ |
|||
"l1": {"onevalue"}, |
|||
"l2": {"twovalue"}, |
|||
}) |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) |
|||
} |
|||
|
|||
func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { |
|||
benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) |
|||
} |
|||
|
|||
func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { |
|||
labels := map[string][]string{} |
|||
|
|||
for i := 0; i < nkeys; i++ { |
|||
var ( |
|||
k = fmt.Sprintf("key-%v", i) |
|||
vs = make([]string, 0, nvalues) |
|||
) |
|||
for j := 0; j < nvalues; j++ { |
|||
vs = append(vs, fmt.Sprintf("value-%v", j)) |
|||
} |
|||
labels[k] = vs |
|||
} |
|||
|
|||
benchmarkMetricVecWithLabelValues(b, labels) |
|||
} |
|||
|
|||
func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { |
|||
var keys []string |
|||
for k := range labels { // Map order dependent, who cares though.
|
|||
keys = append(keys, k) |
|||
} |
|||
|
|||
values := make([]string, len(labels)) // Value cache for permutations.
|
|||
vec := NewUntypedVec( |
|||
UntypedOpts{ |
|||
Name: "test", |
|||
Help: "helpless", |
|||
}, |
|||
keys, |
|||
) |
|||
|
|||
b.ReportAllocs() |
|||
b.ResetTimer() |
|||
for i := 0; i < b.N; i++ { |
|||
// Varies input across provide map entries based on key size.
|
|||
for j, k := range keys { |
|||
candidates := labels[k] |
|||
values[j] = candidates[i%len(candidates)] |
|||
} |
|||
|
|||
vec.WithLabelValues(values...) |
|||
} |
|||
} |
|||
@ -0,0 +1,364 @@ |
|||
// Code generated by protoc-gen-go.
|
|||
// source: metrics.proto
|
|||
// DO NOT EDIT!
|
|||
|
|||
/* |
|||
Package io_prometheus_client is a generated protocol buffer package. |
|||
|
|||
It is generated from these files: |
|||
metrics.proto |
|||
|
|||
It has these top-level messages: |
|||
LabelPair |
|||
Gauge |
|||
Counter |
|||
Quantile |
|||
Summary |
|||
Untyped |
|||
Histogram |
|||
Bucket |
|||
Metric |
|||
MetricFamily |
|||
*/ |
|||
package io_prometheus_client |
|||
|
|||
import proto "github.com/golang/protobuf/proto" |
|||
import math "math" |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ = proto.Marshal |
|||
var _ = math.Inf |
|||
|
|||
type MetricType int32 |
|||
|
|||
const ( |
|||
MetricType_COUNTER MetricType = 0 |
|||
MetricType_GAUGE MetricType = 1 |
|||
MetricType_SUMMARY MetricType = 2 |
|||
MetricType_UNTYPED MetricType = 3 |
|||
MetricType_HISTOGRAM MetricType = 4 |
|||
) |
|||
|
|||
var MetricType_name = map[int32]string{ |
|||
0: "COUNTER", |
|||
1: "GAUGE", |
|||
2: "SUMMARY", |
|||
3: "UNTYPED", |
|||
4: "HISTOGRAM", |
|||
} |
|||
var MetricType_value = map[string]int32{ |
|||
"COUNTER": 0, |
|||
"GAUGE": 1, |
|||
"SUMMARY": 2, |
|||
"UNTYPED": 3, |
|||
"HISTOGRAM": 4, |
|||
} |
|||
|
|||
func (x MetricType) Enum() *MetricType { |
|||
p := new(MetricType) |
|||
*p = x |
|||
return p |
|||
} |
|||
func (x MetricType) String() string { |
|||
return proto.EnumName(MetricType_name, int32(x)) |
|||
} |
|||
func (x *MetricType) UnmarshalJSON(data []byte) error { |
|||
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") |
|||
if err != nil { |
|||
return err |
|||
} |
|||
*x = MetricType(value) |
|||
return nil |
|||
} |
|||
|
|||
type LabelPair struct { |
|||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` |
|||
Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *LabelPair) Reset() { *m = LabelPair{} } |
|||
func (m *LabelPair) String() string { return proto.CompactTextString(m) } |
|||
func (*LabelPair) ProtoMessage() {} |
|||
|
|||
func (m *LabelPair) GetName() string { |
|||
if m != nil && m.Name != nil { |
|||
return *m.Name |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *LabelPair) GetValue() string { |
|||
if m != nil && m.Value != nil { |
|||
return *m.Value |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type Gauge struct { |
|||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Gauge) Reset() { *m = Gauge{} } |
|||
func (m *Gauge) String() string { return proto.CompactTextString(m) } |
|||
func (*Gauge) ProtoMessage() {} |
|||
|
|||
func (m *Gauge) GetValue() float64 { |
|||
if m != nil && m.Value != nil { |
|||
return *m.Value |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Counter struct { |
|||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Counter) Reset() { *m = Counter{} } |
|||
func (m *Counter) String() string { return proto.CompactTextString(m) } |
|||
func (*Counter) ProtoMessage() {} |
|||
|
|||
func (m *Counter) GetValue() float64 { |
|||
if m != nil && m.Value != nil { |
|||
return *m.Value |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Quantile struct { |
|||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` |
|||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Quantile) Reset() { *m = Quantile{} } |
|||
func (m *Quantile) String() string { return proto.CompactTextString(m) } |
|||
func (*Quantile) ProtoMessage() {} |
|||
|
|||
func (m *Quantile) GetQuantile() float64 { |
|||
if m != nil && m.Quantile != nil { |
|||
return *m.Quantile |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Quantile) GetValue() float64 { |
|||
if m != nil && m.Value != nil { |
|||
return *m.Value |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Summary struct { |
|||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` |
|||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` |
|||
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Summary) Reset() { *m = Summary{} } |
|||
func (m *Summary) String() string { return proto.CompactTextString(m) } |
|||
func (*Summary) ProtoMessage() {} |
|||
|
|||
func (m *Summary) GetSampleCount() uint64 { |
|||
if m != nil && m.SampleCount != nil { |
|||
return *m.SampleCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Summary) GetSampleSum() float64 { |
|||
if m != nil && m.SampleSum != nil { |
|||
return *m.SampleSum |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Summary) GetQuantile() []*Quantile { |
|||
if m != nil { |
|||
return m.Quantile |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type Untyped struct { |
|||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Untyped) Reset() { *m = Untyped{} } |
|||
func (m *Untyped) String() string { return proto.CompactTextString(m) } |
|||
func (*Untyped) ProtoMessage() {} |
|||
|
|||
func (m *Untyped) GetValue() float64 { |
|||
if m != nil && m.Value != nil { |
|||
return *m.Value |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Histogram struct { |
|||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` |
|||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` |
|||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Histogram) Reset() { *m = Histogram{} } |
|||
func (m *Histogram) String() string { return proto.CompactTextString(m) } |
|||
func (*Histogram) ProtoMessage() {} |
|||
|
|||
func (m *Histogram) GetSampleCount() uint64 { |
|||
if m != nil && m.SampleCount != nil { |
|||
return *m.SampleCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Histogram) GetSampleSum() float64 { |
|||
if m != nil && m.SampleSum != nil { |
|||
return *m.SampleSum |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Histogram) GetBucket() []*Bucket { |
|||
if m != nil { |
|||
return m.Bucket |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type Bucket struct { |
|||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` |
|||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Bucket) Reset() { *m = Bucket{} } |
|||
func (m *Bucket) String() string { return proto.CompactTextString(m) } |
|||
func (*Bucket) ProtoMessage() {} |
|||
|
|||
func (m *Bucket) GetCumulativeCount() uint64 { |
|||
if m != nil && m.CumulativeCount != nil { |
|||
return *m.CumulativeCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Bucket) GetUpperBound() float64 { |
|||
if m != nil && m.UpperBound != nil { |
|||
return *m.UpperBound |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Metric struct { |
|||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` |
|||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` |
|||
Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` |
|||
Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` |
|||
Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` |
|||
Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` |
|||
TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *Metric) Reset() { *m = Metric{} } |
|||
func (m *Metric) String() string { return proto.CompactTextString(m) } |
|||
func (*Metric) ProtoMessage() {} |
|||
|
|||
func (m *Metric) GetLabel() []*LabelPair { |
|||
if m != nil { |
|||
return m.Label |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetGauge() *Gauge { |
|||
if m != nil { |
|||
return m.Gauge |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetCounter() *Counter { |
|||
if m != nil { |
|||
return m.Counter |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetSummary() *Summary { |
|||
if m != nil { |
|||
return m.Summary |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetUntyped() *Untyped { |
|||
if m != nil { |
|||
return m.Untyped |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetHistogram() *Histogram { |
|||
if m != nil { |
|||
return m.Histogram |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Metric) GetTimestampMs() int64 { |
|||
if m != nil && m.TimestampMs != nil { |
|||
return *m.TimestampMs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type MetricFamily struct { |
|||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` |
|||
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` |
|||
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` |
|||
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` |
|||
XXX_unrecognized []byte `json:"-"` |
|||
} |
|||
|
|||
func (m *MetricFamily) Reset() { *m = MetricFamily{} } |
|||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) } |
|||
func (*MetricFamily) ProtoMessage() {} |
|||
|
|||
func (m *MetricFamily) GetName() string { |
|||
if m != nil && m.Name != nil { |
|||
return *m.Name |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *MetricFamily) GetHelp() string { |
|||
if m != nil && m.Help != nil { |
|||
return *m.Help |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *MetricFamily) GetType() MetricType { |
|||
if m != nil && m.Type != nil { |
|||
return *m.Type |
|||
} |
|||
return MetricType_COUNTER |
|||
} |
|||
|
|||
func (m *MetricFamily) GetMetric() []*Metric { |
|||
if m != nil { |
|||
return m.Metric |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func init() { |
|||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) |
|||
} |
|||
@ -0,0 +1,167 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package expfmt |
|||
|
|||
import ( |
|||
"bytes" |
|||
"compress/gzip" |
|||
"io" |
|||
"io/ioutil" |
|||
"testing" |
|||
|
|||
"github.com/matttproud/golang_protobuf_extensions/pbutil" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
var parser TextParser |
|||
|
|||
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
|||
//
|
|||
// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
|
|||
//
|
|||
// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
|
|||
// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
|
|||
// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
|
|||
// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
|
|||
// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
|
|||
//
|
|||
// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
|
|||
// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
|
|||
// the difference becomes less relevant, only ~4x.
|
|||
//
|
|||
// The test data contains 248 samples.
|
|||
|
|||
// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
|
|||
// family DTOs.
|
|||
func BenchmarkParseText(b *testing.B) { |
|||
b.StopTimer() |
|||
data, err := ioutil.ReadFile("testdata/text") |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
b.StartTimer() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
|
|||
// into metric family DTOs.
|
|||
func BenchmarkParseTextGzip(b *testing.B) { |
|||
b.StopTimer() |
|||
data, err := ioutil.ReadFile("testdata/text.gz") |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
b.StartTimer() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
in, err := gzip.NewReader(bytes.NewReader(data)) |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
if _, err := parser.TextToMetricFamilies(in); err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
} |
|||
} |
|||
|
|||
// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
|
|||
// metric family DTOs. Note that this does not build a map of metric families
|
|||
// (as the text version does), because it is not required for Prometheus
|
|||
// ingestion either. (However, it is required for the text-format parsing, as
|
|||
// the metric family might be sprinkled all over the text, while the
|
|||
// protobuf-format guarantees bundling at one place.)
|
|||
func BenchmarkParseProto(b *testing.B) { |
|||
b.StopTimer() |
|||
data, err := ioutil.ReadFile("testdata/protobuf") |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
b.StartTimer() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
family := &dto.MetricFamily{} |
|||
in := bytes.NewReader(data) |
|||
for { |
|||
family.Reset() |
|||
if _, err := pbutil.ReadDelimited(in, family); err != nil { |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
b.Fatal(err) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
|
|||
// protobuf format.
|
|||
func BenchmarkParseProtoGzip(b *testing.B) { |
|||
b.StopTimer() |
|||
data, err := ioutil.ReadFile("testdata/protobuf.gz") |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
b.StartTimer() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
family := &dto.MetricFamily{} |
|||
in, err := gzip.NewReader(bytes.NewReader(data)) |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
for { |
|||
family.Reset() |
|||
if _, err := pbutil.ReadDelimited(in, family); err != nil { |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
b.Fatal(err) |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
|
|||
// metric family DTOs into a map. This is not happening during Prometheus
|
|||
// ingestion. It is just here to measure the overhead of that map creation and
|
|||
// separate it from the overhead of the text format parsing.
|
|||
func BenchmarkParseProtoMap(b *testing.B) { |
|||
b.StopTimer() |
|||
data, err := ioutil.ReadFile("testdata/protobuf") |
|||
if err != nil { |
|||
b.Fatal(err) |
|||
} |
|||
b.StartTimer() |
|||
|
|||
for i := 0; i < b.N; i++ { |
|||
families := map[string]*dto.MetricFamily{} |
|||
in := bytes.NewReader(data) |
|||
for { |
|||
family := &dto.MetricFamily{} |
|||
if _, err := pbutil.ReadDelimited(in, family); err != nil { |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
b.Fatal(err) |
|||
} |
|||
families[family.GetName()] = family |
|||
} |
|||
} |
|||
} |
|||
@ -0,0 +1,412 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package expfmt |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io" |
|||
"math" |
|||
"mime" |
|||
"net/http" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
|
|||
"github.com/matttproud/golang_protobuf_extensions/pbutil" |
|||
"github.com/prometheus/common/model" |
|||
) |
|||
|
|||
// Decoder types decode an input stream into metric families.
|
|||
type Decoder interface { |
|||
Decode(*dto.MetricFamily) error |
|||
} |
|||
|
|||
type DecodeOptions struct { |
|||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
|||
Timestamp model.Time |
|||
} |
|||
|
|||
// ResponseFormat extracts the correct format from a HTTP response header.
|
|||
// If no matching format can be found FormatUnknown is returned.
|
|||
func ResponseFormat(h http.Header) Format { |
|||
ct := h.Get(hdrContentType) |
|||
|
|||
mediatype, params, err := mime.ParseMediaType(ct) |
|||
if err != nil { |
|||
return FmtUnknown |
|||
} |
|||
|
|||
const textType = "text/plain" |
|||
|
|||
switch mediatype { |
|||
case ProtoType: |
|||
if p, ok := params["proto"]; ok && p != ProtoProtocol { |
|||
return FmtUnknown |
|||
} |
|||
if e, ok := params["encoding"]; ok && e != "delimited" { |
|||
return FmtUnknown |
|||
} |
|||
return FmtProtoDelim |
|||
|
|||
case textType: |
|||
if v, ok := params["version"]; ok && v != TextVersion { |
|||
return FmtUnknown |
|||
} |
|||
return FmtText |
|||
} |
|||
|
|||
return FmtUnknown |
|||
} |
|||
|
|||
// NewDecoder returns a new decoder based on the given input format.
|
|||
// If the input format does not imply otherwise, a text format decoder is returned.
|
|||
func NewDecoder(r io.Reader, format Format) Decoder { |
|||
switch format { |
|||
case FmtProtoDelim: |
|||
return &protoDecoder{r: r} |
|||
} |
|||
return &textDecoder{r: r} |
|||
} |
|||
|
|||
// protoDecoder implements the Decoder interface for protocol buffers.
|
|||
type protoDecoder struct { |
|||
r io.Reader |
|||
} |
|||
|
|||
// Decode implements the Decoder interface.
|
|||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error { |
|||
_, err := pbutil.ReadDelimited(d.r, v) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) { |
|||
return fmt.Errorf("invalid metric name %q", v.GetName()) |
|||
} |
|||
for _, m := range v.GetMetric() { |
|||
if m == nil { |
|||
continue |
|||
} |
|||
for _, l := range m.GetLabel() { |
|||
if l == nil { |
|||
continue |
|||
} |
|||
if !model.LabelValue(l.GetValue()).IsValid() { |
|||
return fmt.Errorf("invalid label value %q", l.GetValue()) |
|||
} |
|||
if !model.LabelName(l.GetName()).IsValid() { |
|||
return fmt.Errorf("invalid label name %q", l.GetName()) |
|||
} |
|||
} |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
// textDecoder implements the Decoder interface for the text protocol.
|
|||
type textDecoder struct { |
|||
r io.Reader |
|||
p TextParser |
|||
fams []*dto.MetricFamily |
|||
} |
|||
|
|||
// Decode implements the Decoder interface.
|
|||
func (d *textDecoder) Decode(v *dto.MetricFamily) error { |
|||
// TODO(fabxc): Wrap this as a line reader to make streaming safer.
|
|||
if len(d.fams) == 0 { |
|||
// No cached metric families, read everything and parse metrics.
|
|||
fams, err := d.p.TextToMetricFamilies(d.r) |
|||
if err != nil { |
|||
return err |
|||
} |
|||
if len(fams) == 0 { |
|||
return io.EOF |
|||
} |
|||
d.fams = make([]*dto.MetricFamily, 0, len(fams)) |
|||
for _, f := range fams { |
|||
d.fams = append(d.fams, f) |
|||
} |
|||
} |
|||
|
|||
*v = *d.fams[0] |
|||
d.fams = d.fams[1:] |
|||
|
|||
return nil |
|||
} |
|||
|
|||
type SampleDecoder struct { |
|||
Dec Decoder |
|||
Opts *DecodeOptions |
|||
|
|||
f dto.MetricFamily |
|||
} |
|||
|
|||
func (sd *SampleDecoder) Decode(s *model.Vector) error { |
|||
if err := sd.Dec.Decode(&sd.f); err != nil { |
|||
return err |
|||
} |
|||
*s = extractSamples(&sd.f, sd.Opts) |
|||
return nil |
|||
} |
|||
|
|||
// Extract samples builds a slice of samples from the provided metric families.
|
|||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { |
|||
var all model.Vector |
|||
for _, f := range fams { |
|||
all = append(all, extractSamples(f, o)...) |
|||
} |
|||
return all |
|||
} |
|||
|
|||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { |
|||
switch f.GetType() { |
|||
case dto.MetricType_COUNTER: |
|||
return extractCounter(o, f) |
|||
case dto.MetricType_GAUGE: |
|||
return extractGauge(o, f) |
|||
case dto.MetricType_SUMMARY: |
|||
return extractSummary(o, f) |
|||
case dto.MetricType_UNTYPED: |
|||
return extractUntyped(o, f) |
|||
case dto.MetricType_HISTOGRAM: |
|||
return extractHistogram(o, f) |
|||
} |
|||
panic("expfmt.extractSamples: unknown metric family type") |
|||
} |
|||
|
|||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { |
|||
samples := make(model.Vector, 0, len(f.Metric)) |
|||
|
|||
for _, m := range f.Metric { |
|||
if m.Counter == nil { |
|||
continue |
|||
} |
|||
|
|||
lset := make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) |
|||
|
|||
smpl := &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Counter.GetValue()), |
|||
} |
|||
|
|||
if m.TimestampMs != nil { |
|||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) |
|||
} else { |
|||
smpl.Timestamp = o.Timestamp |
|||
} |
|||
|
|||
samples = append(samples, smpl) |
|||
} |
|||
|
|||
return samples |
|||
} |
|||
|
|||
func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { |
|||
samples := make(model.Vector, 0, len(f.Metric)) |
|||
|
|||
for _, m := range f.Metric { |
|||
if m.Gauge == nil { |
|||
continue |
|||
} |
|||
|
|||
lset := make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) |
|||
|
|||
smpl := &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Gauge.GetValue()), |
|||
} |
|||
|
|||
if m.TimestampMs != nil { |
|||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) |
|||
} else { |
|||
smpl.Timestamp = o.Timestamp |
|||
} |
|||
|
|||
samples = append(samples, smpl) |
|||
} |
|||
|
|||
return samples |
|||
} |
|||
|
|||
func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { |
|||
samples := make(model.Vector, 0, len(f.Metric)) |
|||
|
|||
for _, m := range f.Metric { |
|||
if m.Untyped == nil { |
|||
continue |
|||
} |
|||
|
|||
lset := make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) |
|||
|
|||
smpl := &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Untyped.GetValue()), |
|||
} |
|||
|
|||
if m.TimestampMs != nil { |
|||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) |
|||
} else { |
|||
smpl.Timestamp = o.Timestamp |
|||
} |
|||
|
|||
samples = append(samples, smpl) |
|||
} |
|||
|
|||
return samples |
|||
} |
|||
|
|||
func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { |
|||
samples := make(model.Vector, 0, len(f.Metric)) |
|||
|
|||
for _, m := range f.Metric { |
|||
if m.Summary == nil { |
|||
continue |
|||
} |
|||
|
|||
timestamp := o.Timestamp |
|||
if m.TimestampMs != nil { |
|||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) |
|||
} |
|||
|
|||
for _, q := range m.Summary.Quantile { |
|||
lset := make(model.LabelSet, len(m.Label)+2) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
// BUG(matt): Update other names to "quantile".
|
|||
lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(q.GetValue()), |
|||
Timestamp: timestamp, |
|||
}) |
|||
} |
|||
|
|||
lset := make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Summary.GetSampleSum()), |
|||
Timestamp: timestamp, |
|||
}) |
|||
|
|||
lset = make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Summary.GetSampleCount()), |
|||
Timestamp: timestamp, |
|||
}) |
|||
} |
|||
|
|||
return samples |
|||
} |
|||
|
|||
func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { |
|||
samples := make(model.Vector, 0, len(f.Metric)) |
|||
|
|||
for _, m := range f.Metric { |
|||
if m.Histogram == nil { |
|||
continue |
|||
} |
|||
|
|||
timestamp := o.Timestamp |
|||
if m.TimestampMs != nil { |
|||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) |
|||
} |
|||
|
|||
infSeen := false |
|||
|
|||
for _, q := range m.Histogram.Bucket { |
|||
lset := make(model.LabelSet, len(m.Label)+2) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") |
|||
|
|||
if math.IsInf(q.GetUpperBound(), +1) { |
|||
infSeen = true |
|||
} |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(q.GetCumulativeCount()), |
|||
Timestamp: timestamp, |
|||
}) |
|||
} |
|||
|
|||
lset := make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Histogram.GetSampleSum()), |
|||
Timestamp: timestamp, |
|||
}) |
|||
|
|||
lset = make(model.LabelSet, len(m.Label)+1) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") |
|||
|
|||
count := &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: model.SampleValue(m.Histogram.GetSampleCount()), |
|||
Timestamp: timestamp, |
|||
} |
|||
samples = append(samples, count) |
|||
|
|||
if !infSeen { |
|||
// Append an infinity bucket sample.
|
|||
lset := make(model.LabelSet, len(m.Label)+2) |
|||
for _, p := range m.Label { |
|||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) |
|||
} |
|||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") |
|||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") |
|||
|
|||
samples = append(samples, &model.Sample{ |
|||
Metric: model.Metric(lset), |
|||
Value: count.Value, |
|||
Timestamp: timestamp, |
|||
}) |
|||
} |
|||
} |
|||
|
|||
return samples |
|||
} |
|||
@ -0,0 +1,367 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package expfmt |
|||
|
|||
import ( |
|||
"io" |
|||
"net/http" |
|||
"reflect" |
|||
"sort" |
|||
"strings" |
|||
"testing" |
|||
|
|||
"github.com/prometheus/common/model" |
|||
) |
|||
|
|||
func TestTextDecoder(t *testing.T) { |
|||
var ( |
|||
ts = model.Now() |
|||
in = ` |
|||
# Only a quite simple scenario with two metric families. |
|||
# More complicated tests of the parser itself can be found in the text package. |
|||
# TYPE mf2 counter |
|||
mf2 3 |
|||
mf1{label="value1"} -3.14 123456 |
|||
mf1{label="value2"} 42 |
|||
mf2 4 |
|||
` |
|||
out = model.Vector{ |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "mf1", |
|||
"label": "value1", |
|||
}, |
|||
Value: -3.14, |
|||
Timestamp: 123456, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "mf1", |
|||
"label": "value2", |
|||
}, |
|||
Value: 42, |
|||
Timestamp: ts, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "mf2", |
|||
}, |
|||
Value: 3, |
|||
Timestamp: ts, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "mf2", |
|||
}, |
|||
Value: 4, |
|||
Timestamp: ts, |
|||
}, |
|||
} |
|||
) |
|||
|
|||
dec := &SampleDecoder{ |
|||
Dec: &textDecoder{r: strings.NewReader(in)}, |
|||
Opts: &DecodeOptions{ |
|||
Timestamp: ts, |
|||
}, |
|||
} |
|||
var all model.Vector |
|||
for { |
|||
var smpls model.Vector |
|||
err := dec.Decode(&smpls) |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
all = append(all, smpls...) |
|||
} |
|||
sort.Sort(all) |
|||
sort.Sort(out) |
|||
if !reflect.DeepEqual(all, out) { |
|||
t.Fatalf("output does not match") |
|||
} |
|||
} |
|||
|
|||
func TestProtoDecoder(t *testing.T) { |
|||
|
|||
var testTime = model.Now() |
|||
|
|||
scenarios := []struct { |
|||
in string |
|||
expected model.Vector |
|||
fail bool |
|||
}{ |
|||
{ |
|||
in: "", |
|||
}, |
|||
{ |
|||
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", |
|||
fail: true, |
|||
}, |
|||
{ |
|||
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", |
|||
expected: model.Vector{ |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
"some_label_name": "some_label_value", |
|||
}, |
|||
Value: -42, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
"another_label_name": "another_label_value", |
|||
}, |
|||
Value: 84, |
|||
Timestamp: testTime, |
|||
}, |
|||
}, |
|||
}, |
|||
{ |
|||
in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", |
|||
expected: model.Vector{ |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count_count", |
|||
"some_label_name": "some_label_value", |
|||
}, |
|||
Value: 0, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count_sum", |
|||
"some_label_name": "some_label_value", |
|||
}, |
|||
Value: 0, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
"some_label_name": "some_label_value", |
|||
"quantile": "0.99", |
|||
}, |
|||
Value: -42, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
"some_label_name": "some_label_value", |
|||
"quantile": "0.999", |
|||
}, |
|||
Value: -84, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count_count", |
|||
"another_label_name": "another_label_value", |
|||
}, |
|||
Value: 0, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count_sum", |
|||
"another_label_name": "another_label_value", |
|||
}, |
|||
Value: 0, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
"another_label_name": "another_label_value", |
|||
"quantile": "0.5", |
|||
}, |
|||
Value: 10, |
|||
Timestamp: testTime, |
|||
}, |
|||
}, |
|||
}, |
|||
{ |
|||
in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", |
|||
expected: model.Vector{ |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_bucket", |
|||
"le": "100", |
|||
}, |
|||
Value: 123, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_bucket", |
|||
"le": "120", |
|||
}, |
|||
Value: 412, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_bucket", |
|||
"le": "144", |
|||
}, |
|||
Value: 592, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_bucket", |
|||
"le": "172.8", |
|||
}, |
|||
Value: 1524, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_bucket", |
|||
"le": "+Inf", |
|||
}, |
|||
Value: 2693, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_sum", |
|||
}, |
|||
Value: 1756047.3, |
|||
Timestamp: testTime, |
|||
}, |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_duration_microseconds_count", |
|||
}, |
|||
Value: 2693, |
|||
Timestamp: testTime, |
|||
}, |
|||
}, |
|||
}, |
|||
{ |
|||
// The metric type is unset in this protobuf, which needs to be handled
|
|||
// correctly by the decoder.
|
|||
in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?", |
|||
expected: model.Vector{ |
|||
&model.Sample{ |
|||
Metric: model.Metric{ |
|||
model.MetricNameLabel: "request_count", |
|||
}, |
|||
Value: 1, |
|||
Timestamp: testTime, |
|||
}, |
|||
}, |
|||
}, |
|||
} |
|||
|
|||
for i, scenario := range scenarios { |
|||
dec := &SampleDecoder{ |
|||
Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, |
|||
Opts: &DecodeOptions{ |
|||
Timestamp: testTime, |
|||
}, |
|||
} |
|||
|
|||
var all model.Vector |
|||
for { |
|||
var smpls model.Vector |
|||
err := dec.Decode(&smpls) |
|||
if err == io.EOF { |
|||
break |
|||
} |
|||
if scenario.fail { |
|||
if err == nil { |
|||
t.Fatal("Expected error but got none") |
|||
} |
|||
break |
|||
} |
|||
if err != nil { |
|||
t.Fatal(err) |
|||
} |
|||
all = append(all, smpls...) |
|||
} |
|||
sort.Sort(all) |
|||
sort.Sort(scenario.expected) |
|||
if !reflect.DeepEqual(all, scenario.expected) { |
|||
t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func testDiscriminatorHTTPHeader(t testing.TB) { |
|||
var scenarios = []struct { |
|||
input map[string]string |
|||
output Format |
|||
err error |
|||
}{ |
|||
{ |
|||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, |
|||
output: FmtProtoDelim, |
|||
}, |
|||
{ |
|||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, |
|||
output: FmtUnknown, |
|||
}, |
|||
{ |
|||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, |
|||
output: FmtUnknown, |
|||
}, |
|||
{ |
|||
input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, |
|||
output: FmtText, |
|||
}, |
|||
{ |
|||
input: map[string]string{"Content-Type": `text/plain`}, |
|||
output: FmtText, |
|||
}, |
|||
{ |
|||
input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, |
|||
output: FmtUnknown, |
|||
}, |
|||
} |
|||
|
|||
for i, scenario := range scenarios { |
|||
var header http.Header |
|||
|
|||
if len(scenario.input) > 0 { |
|||
header = http.Header{} |
|||
} |
|||
|
|||
for key, value := range scenario.input { |
|||
header.Add(key, value) |
|||
} |
|||
|
|||
actual := ResponseFormat(header) |
|||
|
|||
if scenario.output != actual { |
|||
t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) |
|||
} |
|||
} |
|||
} |
|||
|
|||
func TestDiscriminatorHTTPHeader(t *testing.T) { |
|||
testDiscriminatorHTTPHeader(t) |
|||
} |
|||
|
|||
func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { |
|||
for i := 0; i < b.N; i++ { |
|||
testDiscriminatorHTTPHeader(b) |
|||
} |
|||
} |
|||
@ -0,0 +1,88 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
package expfmt |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io" |
|||
"net/http" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
"github.com/matttproud/golang_protobuf_extensions/pbutil" |
|||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" |
|||
|
|||
dto "github.com/prometheus/client_model/go" |
|||
) |
|||
|
|||
// Encoder types encode metric families into an underlying wire protocol.
|
|||
type Encoder interface { |
|||
Encode(*dto.MetricFamily) error |
|||
} |
|||
|
|||
type encoder func(*dto.MetricFamily) error |
|||
|
|||
func (e encoder) Encode(v *dto.MetricFamily) error { |
|||
return e(v) |
|||
} |
|||
|
|||
// Negotiate returns the Content-Type based on the given Accept header.
|
|||
// If no appropriate accepted type is found, FmtText is returned.
|
|||
func Negotiate(h http.Header) Format { |
|||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { |
|||
// Check for protocol buffer
|
|||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { |
|||
switch ac.Params["encoding"] { |
|||
case "delimited": |
|||
return FmtProtoDelim |
|||
case "text": |
|||
return FmtProtoText |
|||
case "compact-text": |
|||
return FmtProtoCompact |
|||
} |
|||
} |
|||
// Check for text format.
|
|||
ver := ac.Params["version"] |
|||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { |
|||
return FmtText |
|||
} |
|||
} |
|||
return FmtText |
|||
} |
|||
|
|||
// NewEncoder returns a new encoder based on content type negotiation.
|
|||
func NewEncoder(w io.Writer, format Format) Encoder { |
|||
switch format { |
|||
case FmtProtoDelim: |
|||
return encoder(func(v *dto.MetricFamily) error { |
|||
_, err := pbutil.WriteDelimited(w, v) |
|||
return err |
|||
}) |
|||
case FmtProtoCompact: |
|||
return encoder(func(v *dto.MetricFamily) error { |
|||
_, err := fmt.Fprintln(w, v.String()) |
|||
return err |
|||
}) |
|||
case FmtProtoText: |
|||
return encoder(func(v *dto.MetricFamily) error { |
|||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v)) |
|||
return err |
|||
}) |
|||
case FmtText: |
|||
return encoder(func(v *dto.MetricFamily) error { |
|||
_, err := MetricFamilyToText(w, v) |
|||
return err |
|||
}) |
|||
} |
|||
panic("expfmt.NewEncoder: unknown format") |
|||
} |
|||
@ -0,0 +1,37 @@ |
|||
// Copyright 2015 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// A package for reading and writing Prometheus metrics.
|
|||
package expfmt |
|||
|
|||
type Format string |
|||
|
|||
const ( |
|||
TextVersion = "0.0.4" |
|||
|
|||
ProtoType = `application/vnd.google.protobuf` |
|||
ProtoProtocol = `io.prometheus.client.MetricFamily` |
|||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" |
|||
|
|||
// The Content-Type values for the different wire protocols.
|
|||
FmtUnknown Format = `<unknown>` |
|||
FmtText Format = `text/plain; version=` + TextVersion |
|||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` |
|||
FmtProtoText Format = ProtoFmt + ` encoding=text` |
|||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` |
|||
) |
|||
|
|||
const ( |
|||
hdrContentType = "Content-Type" |
|||
hdrAccept = "Accept" |
|||
) |
|||
@ -0,0 +1,36 @@ |
|||
// Copyright 2014 The Prometheus Authors
|
|||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|||
// you may not use this file except in compliance with the License.
|
|||
// You may obtain a copy of the License at
|
|||
//
|
|||
// http://www.apache.org/licenses/LICENSE-2.0
|
|||
//
|
|||
// Unless required by applicable law or agreed to in writing, software
|
|||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
// See the License for the specific language governing permissions and
|
|||
// limitations under the License.
|
|||
|
|||
// Build only when actually fuzzing
|
|||
// +build gofuzz
|
|||
|
|||
package expfmt |
|||
|
|||
import "bytes" |
|||
|
|||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
|||
//
|
|||
// go-fuzz-build github.com/prometheus/common/expfmt
|
|||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
|||
//
|
|||
// Further input samples should go in the folder fuzz/corpus.
|
|||
func Fuzz(in []byte) int { |
|||
parser := TextParser{} |
|||
_, err := parser.TextToMetricFamilies(bytes.NewReader(in)) |
|||
|
|||
if err != nil { |
|||
return 0 |
|||
} |
|||
|
|||
return 1 |
|||
} |
|||
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue