diff --git a/vendor/manifest b/vendor/manifest
index 9a0a4d78..3be82729 100644
--- a/vendor/manifest
+++ b/vendor/manifest
@@ -7,6 +7,19 @@
"revision": "574d3147eee384229bf96a5d12c207fe7b5234f3",
"branch": "master"
},
+ {
+ "importpath": "github.com/Sirupsen/logrus",
+ "repository": "https://github.com/Sirupsen/logrus",
+ "revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+ "branch": "master"
+ },
+ {
+ "importpath": "github.com/beorn7/perks/quantile",
+ "repository": "https://github.com/beorn7/perks",
+ "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9",
+ "branch": "master",
+ "path": "/quantile"
+ },
{
"importpath": "github.com/davecgh/go-spew/spew",
"repository": "https://github.com/davecgh/go-spew",
@@ -33,6 +46,13 @@
"revision": "44cc805cf13205b55f69e14bcb69867d1ae92f98",
"branch": "master"
},
+ {
+ "importpath": "github.com/golang/protobuf/proto",
+ "repository": "https://github.com/golang/protobuf",
+ "revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
+ "branch": "master",
+ "path": "/proto"
+ },
{
"importpath": "github.com/golang/snappy",
"repository": "https://github.com/golang/snappy",
@@ -51,6 +71,19 @@
"revision": "a6657b2386e9b8be76484c08711b02c7cf867ead",
"branch": "master"
},
+ {
+ "importpath": "github.com/matrix-org/util",
+ "repository": "https://github.com/matrix-org/util",
+ "revision": "2aeb7e5d047ec74d65353f1579990a1e90af5bb0",
+ "branch": "master"
+ },
+ {
+ "importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "repository": "https://github.com/matttproud/golang_protobuf_extensions",
+ "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
+ "branch": "master",
+ "path": "/pbutil"
+ },
{
"importpath": "github.com/pierrec/lz4",
"repository": "https://github.com/pierrec/lz4",
@@ -64,17 +97,82 @@
"branch": "master",
"path": "/xxHash32"
},
+ {
+ "importpath": "github.com/prometheus/client_golang",
+ "repository": "https://github.com/prometheus/client_golang",
+ "revision": "c317fb74746eac4fc65fe3909195f4cf67c5562a",
+ "branch": "master"
+ },
+ {
+ "importpath": "github.com/prometheus/client_model/go",
+ "repository": "https://github.com/prometheus/client_model",
+ "revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6",
+ "branch": "master",
+ "path": "/go"
+ },
+ {
+ "importpath": "github.com/prometheus/common/expfmt",
+ "repository": "https://github.com/prometheus/common",
+ "revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
+ "branch": "master",
+ "path": "/expfmt"
+ },
+ {
+ "importpath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+ "repository": "https://github.com/prometheus/common",
+ "revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
+ "branch": "master",
+ "path": "/internal/bitbucket.org/ww/goautoneg"
+ },
+ {
+ "importpath": "github.com/prometheus/common/model",
+ "repository": "https://github.com/prometheus/common",
+ "revision": "dd2f054febf4a6c00f2343686efb775948a8bff4",
+ "branch": "master",
+ "path": "/model"
+ },
+ {
+ "importpath": "github.com/prometheus/procfs",
+ "repository": "https://github.com/prometheus/procfs",
+ "revision": "1878d9fbb537119d24b21ca07effd591627cd160",
+ "branch": "master"
+ },
{
"importpath": "github.com/rcrowley/go-metrics",
"repository": "https://github.com/rcrowley/go-metrics",
"revision": "1f30fe9094a513ce4c700b9a54458bbb0c96996c",
"branch": "master"
},
+ {
+ "importpath": "github.com/sirupsen/logrus",
+ "repository": "https://github.com/sirupsen/logrus",
+ "revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+ "branch": "master"
+ },
+ {
+ "importpath": "golang.org/x/net/context",
+ "repository": "https://go.googlesource.com/net",
+ "revision": "007e530097ad7f954752df63046b4036f98ba6a6",
+ "branch": "master",
+ "path": "/context"
+ },
{
"importpath": "gopkg.in/Shopify/sarama.v1",
"repository": "https://gopkg.in/Shopify/sarama.v1",
"revision": "0fb560e5f7fbcaee2f75e3c34174320709f69944",
"branch": "master"
+ },
+ {
+ "importpath": "gopkg.in/airbrake/gobrake.v2",
+ "repository": "https://gopkg.in/airbrake/gobrake.v2",
+ "revision": "668876711219e8b0206e2994bf0a59d889c775aa",
+ "branch": "master"
+ },
+ {
+ "importpath": "gopkg.in/gemnasium/logrus-airbrake-hook.v2",
+ "repository": "https://gopkg.in/gemnasium/logrus-airbrake-hook.v2",
+ "revision": "bfee1239d796830ca346767650cce5ba90d58c57",
+ "branch": "master"
}
]
-}
\ No newline at end of file
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 00000000..f2c2bc21
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/src/github.com/Sirupsen/logrus/LICENSE b/vendor/src/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 00000000..f090cb42
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 00000000..206c746c
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr, could also be a file.
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/src/github.com/Sirupsen/logrus/alt_exit.go b/vendor/src/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 00000000..b4c9e847
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go
new file mode 100644
index 00000000..022b7783
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "io/ioutil"
+ "os/exec"
+ "testing"
+ "time"
+)
+
+func TestRegister(t *testing.T) {
+ current := len(handlers)
+ RegisterExitHandler(func() {})
+ if len(handlers) != current+1 {
+ t.Fatalf("can't add handler")
+ }
+}
+
+func TestHandler(t *testing.T) {
+ gofile := "/tmp/testprog.go"
+ if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
+ t.Fatalf("can't create go file")
+ }
+
+ outfile := "/tmp/testprog.out"
+ arg := time.Now().UTC().String()
+ err := exec.Command("go", "run", gofile, outfile, arg).Run()
+ if err == nil {
+ t.Fatalf("completed normally, should have failed")
+ }
+
+ data, err := ioutil.ReadFile(outfile)
+ if err != nil {
+ t.Fatalf("can't read output file %s", outfile)
+ }
+
+ if string(data) != arg {
+ t.Fatalf("bad data")
+ }
+}
+
+var testprog = []byte(`
+// Test program for atexit, gets output file and data as arguments and writes
+// data to output file in atexit handler.
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "flag"
+ "fmt"
+ "io/ioutil"
+)
+
+var outfile = ""
+var data = ""
+
+func handler() {
+ ioutil.WriteFile(outfile, []byte(data), 0666)
+}
+
+func badHandler() {
+ n := 0
+ fmt.Println(1/n)
+}
+
+func main() {
+ flag.Parse()
+ outfile = flag.Arg(0)
+ data = flag.Arg(1)
+
+ logrus.RegisterExitHandler(handler)
+ logrus.RegisterExitHandler(badHandler)
+ logrus.Fatal("Bye bye")
+}
+`)
diff --git a/vendor/src/github.com/Sirupsen/logrus/doc.go b/vendor/src/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 00000000..dddd5f87
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/src/github.com/Sirupsen/logrus/entry.go b/vendor/src/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 00000000..4edbe7a2
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/entry_test.go b/vendor/src/github.com/Sirupsen/logrus/entry_test.go
new file mode 100644
index 00000000..99c3b41d
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/entry_test.go
@@ -0,0 +1,77 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryWithError(t *testing.T) {
+
+ assert := assert.New(t)
+
+ defer func() {
+ ErrorKey = "error"
+ }()
+
+ err := fmt.Errorf("kaboom at layer %d", 4711)
+
+ assert.Equal(err, WithError(err).Data["error"])
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+
+ assert.Equal(err, entry.WithError(err).Data["error"])
+
+ ErrorKey = "err"
+
+ assert.Equal(err, entry.WithError(err).Data["err"])
+
+}
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 00000000..a1623ec0
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Level = logrus.DebugLevel
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 00000000..3187f6d3
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
+}
+
+func main() {
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/exported.go b/vendor/src/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 00000000..9a0120ac
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 00000000..b5fbe934
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 00000000..c6d290c7
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+var errorFields = Fields{
+ "foo": fmt.Errorf("bar"),
+ "baz": fmt.Errorf("qux"),
+}
+
+func BenchmarkErrorTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hook_test.go b/vendor/src/github.com/Sirupsen/logrus/hook_test.go
new file mode 100644
index 00000000..13f34cb6
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks.go b/vendor/src/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 00000000..3f151cdc
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 00000000..066704b3
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,39 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
\ No newline at end of file
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 00000000..a36e2003
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,54 @@
+// +build !windows,!nacl,!plan9
+
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 00000000..42762dc1
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go
new file mode 100644
index 00000000..06881253
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go
@@ -0,0 +1,67 @@
+package test
+
+import (
+ "io/ioutil"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// test.Hook is a hook designed for dealing with logs in test scenarios.
+type Hook struct {
+ Entries []*logrus.Entry
+}
+
+// Installs a test hook for the global logger.
+func NewGlobal() *Hook {
+
+ hook := new(Hook)
+ logrus.AddHook(hook)
+
+ return hook
+
+}
+
+// Installs a test hook for a given local logger.
+func NewLocal(logger *logrus.Logger) *Hook {
+
+ hook := new(Hook)
+ logger.Hooks.Add(hook)
+
+ return hook
+
+}
+
+// Creates a discarding logger and installs the test hook.
+func NewNullLogger() (*logrus.Logger, *Hook) {
+
+ logger := logrus.New()
+ logger.Out = ioutil.Discard
+
+ return logger, NewLocal(logger)
+
+}
+
+func (t *Hook) Fire(e *logrus.Entry) error {
+ t.Entries = append(t.Entries, e)
+ return nil
+}
+
+func (t *Hook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
+
+// LastEntry returns the last entry that was logged or nil.
+func (t *Hook) LastEntry() (l *logrus.Entry) {
+
+ if i := len(t.Entries) - 1; i < 0 {
+ return nil
+ } else {
+ return t.Entries[i]
+ }
+
+}
+
+// Reset removes all Entries from this test hook.
+func (t *Hook) Reset() {
+ t.Entries = make([]*logrus.Entry, 0)
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go
new file mode 100644
index 00000000..d69455ba
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go
@@ -0,0 +1,39 @@
+package test
+
+import (
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAllHooks(t *testing.T) {
+
+ assert := assert.New(t)
+
+ logger, hook := NewNullLogger()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ logger.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+ logger.Warn("Hello warning")
+ assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
+ assert.Equal("Hello warning", hook.LastEntry().Message)
+ assert.Equal(2, len(hook.Entries))
+
+ hook.Reset()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ hook = NewGlobal()
+
+ logrus.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 00000000..266554e9
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for various fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyLevel: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go
new file mode 100644
index 00000000..51093a79
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go
@@ -0,0 +1,199 @@
+package logrus
+
+import (
+ "encoding/json"
+ "errors"
+ "strings"
+ "testing"
+)
+
+func TestErrorNotLost(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["error"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["omg"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestFieldClashWithTime(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("time", "right now!"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.time"] != "right now!" {
+ t.Fatal("fields.time not set to original time field")
+ }
+
+ if entry["time"] != "0001-01-01T00:00:00Z" {
+ t.Fatal("time field not set to current time, was: ", entry["time"])
+ }
+}
+
+func TestFieldClashWithMsg(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("msg", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.msg"] != "something" {
+ t.Fatal("fields.msg not set to original msg field")
+ }
+}
+
+func TestFieldClashWithLevel(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.level"] != "something" {
+ t.Fatal("fields.level not set to original level field")
+ }
+}
+
+func TestJSONEntryEndsWithNewline(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ if b[len(b)-1] != '\n' {
+ t.Fatal("Expected JSON log entry to end with a newline")
+ }
+}
+
+func TestJSONMessageKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyMsg: "message",
+ },
+ }
+
+ b, err := formatter.Format(&Entry{Message: "oh hai"})
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
+ t.Fatal("Expected JSON to format message key")
+ }
+}
+
+func TestJSONLevelKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyLevel: "somelevel",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "somelevel") {
+ t.Fatal("Expected JSON to format level key")
+ }
+}
+
+func TestJSONTimeKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyTime: "timeywimey",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "timeywimey") {
+ t.Fatal("Expected JSON to format time key")
+ }
+}
+
+func TestJSONDisableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{
+ DisableTimestamp: true,
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if strings.Contains(s, FieldKeyTime) {
+ t.Error("Did not prevent timestamp", s)
+ }
+}
+
+func TestJSONEnableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, FieldKeyTime) {
+ t.Error("Timestamp not present", s)
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 00000000..b769f3d3
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/src/github.com/Sirupsen/logrus/logger_bench_test.go
new file mode 100644
index 00000000..dd23a353
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/logger_bench_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+ "os"
+ "testing"
+)
+
+// smallFields is a small size data set for benchmarking
+var loggerFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+func BenchmarkDummyLogger(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkDummyLoggerNoLock(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
+
+func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ logger.SetNoLock()
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus.go b/vendor/src/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 00000000..e5966911
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus_test.go b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go
new file mode 100644
index 00000000..bfc47805
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/logrus_test.go
@@ -0,0 +1,361 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val := kvArr[1]
+ if kvArr[1][0] == '"' {
+ var err error
+ val, err = strconv.Unquote(val)
+ assert.NoError(t, err)
+ }
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ llog := logger.WithField("context", "eating raw fish")
+
+ llog.Info("looks delicious")
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded first message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "looks delicious")
+ assert.Equal(t, fields["context"], "eating raw fish")
+
+ buffer.Reset()
+
+ llog.Warn("omg it is!")
+
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded second message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "omg it is!")
+ assert.Equal(t, fields["context"], "eating raw fish")
+ assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("PANIC")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("FATAL")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("ERROR")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARN")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARNING")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("INFO")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("DEBUG")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
+
+func TestGetSetLevelRace(t *testing.T) {
+ wg := sync.WaitGroup{}
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ if i%2 == 0 {
+ SetLevel(InfoLevel)
+ } else {
+ GetLevel()
+ }
+ }(i)
+
+ }
+ wg.Wait()
+}
+
+func TestLoggingRace(t *testing.T) {
+ logger := New()
+
+ var wg sync.WaitGroup
+ wg.Add(100)
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ logger.Info("info")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+// Compile test
+func TestLogrusInterface(t *testing.T) {
+ var buffer bytes.Buffer
+ fn := func(l FieldLogger) {
+ b := l.WithField("key", "value")
+ b.Debug("Test")
+ }
+ // test logger
+ logger := New()
+ logger.Out = &buffer
+ fn(logger)
+
+ // test Entry
+ e := logger.WithField("another", "value")
+ fn(e)
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/src/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 00000000..1960169e
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ return true
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 00000000..5f6be4d3
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 00000000..308160ca
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000..329038f6
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 00000000..a3c6f6e7
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000..3727e8ad
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 00000000..076de5da
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
+ case string:
+ if !needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if !needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", errmsg)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+}
diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 00000000..107703fa
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,71 @@
+package logrus
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+ "time"
+ "strings"
+)
+
+func TestQuoting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ checkQuoting := func(q bool, value interface{}) {
+ b, _ := tf.Format(WithField("test", value))
+ idx := bytes.Index(b, ([]byte)("test="))
+ cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ if cont != q {
+ if q {
+ t.Errorf("quoting expected for: %#v", value)
+ } else {
+ t.Errorf("quoting not expected for: %#v", value)
+ }
+ }
+ }
+
+ checkQuoting(false, "abcd")
+ checkQuoting(false, "v1.0")
+ checkQuoting(false, "1234567890")
+ checkQuoting(true, "/foobar")
+ checkQuoting(true, "x y")
+ checkQuoting(true, "x,y")
+ checkQuoting(false, errors.New("invalid"))
+ checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestTimestampFormat(t *testing.T) {
+ checkTimeStr := func(format string) {
+ customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+ customStr, _ := customFormatter.Format(WithField("test", "test"))
+ timeStart := bytes.Index(customStr, ([]byte)("time="))
+ timeEnd := bytes.Index(customStr, ([]byte)("level="))
+ timeStr := customStr[timeStart+5 : timeEnd-1]
+ if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
+ timeStr = timeStr[1 : len(timeStr)-1]
+ }
+ if format == "" {
+ format = time.RFC3339
+ }
+ _, e := time.Parse(format, (string)(timeStr))
+ if e != nil {
+ t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+ }
+ }
+
+ checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+ checkTimeStr("Mon Jan _2 15:04:05 2006")
+ checkTimeStr("")
+}
+
+func TestDisableTimestampWithColoredOutput(t *testing.T) {
+ tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
+
+ b, _ := tf.Format(WithField("test", "test"))
+ if strings.Contains(string(b), "[0000]") {
+ t.Error("timestamp not expected when DisableTimestamp is true")
+ }
+}
+
+// TODO add tests for sorting etc., this requires a parser for the text
+// formatter output.
diff --git a/vendor/src/github.com/Sirupsen/logrus/writer.go b/vendor/src/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 00000000..f74d2aa5
--- /dev/null
+++ b/vendor/src/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+ switch level {
+ case DebugLevel:
+ printFunc = logger.Debug
+ case InfoLevel:
+ printFunc = logger.Info
+ case WarnLevel:
+ printFunc = logger.Warn
+ case ErrorLevel:
+ printFunc = logger.Error
+ case FatalLevel:
+ printFunc = logger.Fatal
+ case PanicLevel:
+ printFunc = logger.Panic
+ default:
+ printFunc = logger.Print
+ }
+
+ go logger.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/bench_test.go b/vendor/src/github.com/beorn7/perks/quantile/bench_test.go
new file mode 100644
index 00000000..0bd0e4e7
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/bench_test.go
@@ -0,0 +1,63 @@
+package quantile
+
+import (
+ "testing"
+)
+
+func BenchmarkInsertTargeted(b *testing.B) {
+ b.ReportAllocs()
+
+ s := NewTargeted(Targets)
+ b.ResetTimer()
+ for i := float64(0); i < float64(b.N); i++ {
+ s.Insert(i)
+ }
+}
+
+func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
+ s := NewTargeted(TargetsSmallEpsilon)
+ b.ResetTimer()
+ for i := float64(0); i < float64(b.N); i++ {
+ s.Insert(i)
+ }
+}
+
+func BenchmarkInsertBiased(b *testing.B) {
+ s := NewLowBiased(0.01)
+ b.ResetTimer()
+ for i := float64(0); i < float64(b.N); i++ {
+ s.Insert(i)
+ }
+}
+
+func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
+ s := NewLowBiased(0.0001)
+ b.ResetTimer()
+ for i := float64(0); i < float64(b.N); i++ {
+ s.Insert(i)
+ }
+}
+
+func BenchmarkQuery(b *testing.B) {
+ s := NewTargeted(Targets)
+ for i := float64(0); i < 1e6; i++ {
+ s.Insert(i)
+ }
+ b.ResetTimer()
+ n := float64(b.N)
+ for i := float64(0); i < n; i++ {
+ s.Query(i / n)
+ }
+}
+
+func BenchmarkQuerySmallEpsilon(b *testing.B) {
+ s := NewTargeted(TargetsSmallEpsilon)
+ for i := float64(0); i < 1e6; i++ {
+ s.Insert(i)
+ }
+ b.ResetTimer()
+ n := float64(b.N)
+ for i := float64(0); i < n; i++ {
+ s.Query(i / n)
+ }
+}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/example_test.go b/vendor/src/github.com/beorn7/perks/quantile/example_test.go
new file mode 100644
index 00000000..ab3293aa
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/example_test.go
@@ -0,0 +1,121 @@
+// +build go1.1
+
+package quantile_test
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+)
+
+func Example_simple() {
+ ch := make(chan float64)
+ go sendFloats(ch)
+
+ // Compute the 50th, 90th, and 99th percentile.
+ q := quantile.NewTargeted(map[float64]float64{
+ 0.50: 0.005,
+ 0.90: 0.001,
+ 0.99: 0.0001,
+ })
+ for v := range ch {
+ q.Insert(v)
+ }
+
+ fmt.Println("perc50:", q.Query(0.50))
+ fmt.Println("perc90:", q.Query(0.90))
+ fmt.Println("perc99:", q.Query(0.99))
+ fmt.Println("count:", q.Count())
+ // Output:
+ // perc50: 5
+ // perc90: 16
+ // perc99: 223
+ // count: 2388
+}
+
+func Example_mergeMultipleStreams() {
+ // Scenario:
+ // We have multiple database shards. On each shard, there is a process
+ // collecting query response times from the database logs and inserting
+ // them into a Stream (created via NewTargeted(0.90)), much like the
+ // Simple example. These processes expose a network interface for us to
+ // ask them to serialize and send us the results of their
+ // Stream.Samples so we may Merge and Query them.
+ //
+ // NOTES:
+ // * These sample sets are small, allowing us to get them
+ // across the network much faster than sending the entire list of data
+ // points.
+ //
+ // * For this to work correctly, we must supply the same quantiles
+ // a priori the process collecting the samples supplied to NewTargeted,
+ // even if we do not plan to query them all here.
+ ch := make(chan quantile.Samples)
+ getDBQuerySamples(ch)
+ q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
+ for samples := range ch {
+ q.Merge(samples)
+ }
+ fmt.Println("perc90:", q.Query(0.90))
+}
+
+func Example_window() {
+ // Scenario: We want the 90th, 95th, and 99th percentiles for each
+ // minute.
+
+ ch := make(chan float64)
+ go sendStreamValues(ch)
+
+ tick := time.NewTicker(1 * time.Minute)
+ q := quantile.NewTargeted(map[float64]float64{
+ 0.90: 0.001,
+ 0.95: 0.0005,
+ 0.99: 0.0001,
+ })
+ for {
+ select {
+ case t := <-tick.C:
+ flushToDB(t, q.Samples())
+ q.Reset()
+ case v := <-ch:
+ q.Insert(v)
+ }
+ }
+}
+
+func sendStreamValues(ch chan float64) {
+ // Use your imagination
+}
+
+func flushToDB(t time.Time, samples quantile.Samples) {
+ // Use your imagination
+}
+
+// This is a stub for the above example. In reality this would hit the remote
+// servers via http or something like it.
+func getDBQuerySamples(ch chan quantile.Samples) {}
+
+func sendFloats(ch chan<- float64) {
+ f, err := os.Open("exampledata.txt")
+ if err != nil {
+ log.Fatal(err)
+ }
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ b := sc.Bytes()
+ v, err := strconv.ParseFloat(string(b), 64)
+ if err != nil {
+ log.Fatal(err)
+ }
+ ch <- v
+ }
+ if sc.Err() != nil {
+ log.Fatal(sc.Err())
+ }
+ close(ch)
+}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 00000000..1602287d
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream.go b/vendor/src/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 00000000..f4cabd66
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream_test.go b/vendor/src/github.com/beorn7/perks/quantile/stream_test.go
new file mode 100644
index 00000000..85519509
--- /dev/null
+++ b/vendor/src/github.com/beorn7/perks/quantile/stream_test.go
@@ -0,0 +1,215 @@
+package quantile
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "testing"
+)
+
+var (
+ Targets = map[float64]float64{
+ 0.01: 0.001,
+ 0.10: 0.01,
+ 0.50: 0.05,
+ 0.90: 0.01,
+ 0.99: 0.001,
+ }
+ TargetsSmallEpsilon = map[float64]float64{
+ 0.01: 0.0001,
+ 0.10: 0.001,
+ 0.50: 0.005,
+ 0.90: 0.001,
+ 0.99: 0.0001,
+ }
+ LowQuantiles = []float64{0.01, 0.1, 0.5}
+ HighQuantiles = []float64{0.99, 0.9, 0.5}
+)
+
+const RelativeEpsilon = 0.01
+
+func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
+ sort.Float64s(a)
+ for quantile, epsilon := range Targets {
+ n := float64(len(a))
+ k := int(quantile * n)
+ if k < 1 {
+ k = 1
+ }
+ lower := int((quantile - epsilon) * n)
+ if lower < 1 {
+ lower = 1
+ }
+ upper := int(math.Ceil((quantile + epsilon) * n))
+ if upper > len(a) {
+ upper = len(a)
+ }
+ w, min, max := a[k-1], a[lower-1], a[upper-1]
+ if g := s.Query(quantile); g < min || g > max {
+ t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
+ }
+ }
+}
+
+func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
+ sort.Float64s(a)
+ for _, qu := range LowQuantiles {
+ n := float64(len(a))
+ k := int(qu * n)
+
+ lowerRank := int((1 - RelativeEpsilon) * qu * n)
+ upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
+ w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
+ if g := s.Query(qu); g < min || g > max {
+ t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
+ }
+ }
+}
+
+func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
+ sort.Float64s(a)
+ for _, qu := range HighQuantiles {
+ n := float64(len(a))
+ k := int(qu * n)
+
+ lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
+ upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
+ w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
+ if g := s.Query(qu); g < min || g > max {
+ t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
+ }
+ }
+}
+
+func populateStream(s *Stream) []float64 {
+ a := make([]float64, 0, 1e5+100)
+ for i := 0; i < cap(a); i++ {
+ v := rand.NormFloat64()
+ // Add 5% asymmetric outliers.
+ if i%20 == 0 {
+ v = v*v + 1
+ }
+ s.Insert(v)
+ a = append(a, v)
+ }
+ return a
+}
+
+func TestTargetedQuery(t *testing.T) {
+ rand.Seed(42)
+ s := NewTargeted(Targets)
+ a := populateStream(s)
+ verifyPercsWithAbsoluteEpsilon(t, a, s)
+}
+
+func TestTargetedQuerySmallSampleSize(t *testing.T) {
+ rand.Seed(42)
+ s := NewTargeted(TargetsSmallEpsilon)
+ a := []float64{1, 2, 3, 4, 5}
+ for _, v := range a {
+ s.Insert(v)
+ }
+ verifyPercsWithAbsoluteEpsilon(t, a, s)
+ // If not yet flushed, results should be precise:
+ if !s.flushed() {
+ for φ, want := range map[float64]float64{
+ 0.01: 1,
+ 0.10: 1,
+ 0.50: 3,
+ 0.90: 5,
+ 0.99: 5,
+ } {
+ if got := s.Query(φ); got != want {
+ t.Errorf("want %f for φ=%f, got %f", want, φ, got)
+ }
+ }
+ }
+}
+
+func TestLowBiasedQuery(t *testing.T) {
+ rand.Seed(42)
+ s := NewLowBiased(RelativeEpsilon)
+ a := populateStream(s)
+ verifyLowPercsWithRelativeEpsilon(t, a, s)
+}
+
+func TestHighBiasedQuery(t *testing.T) {
+ rand.Seed(42)
+ s := NewHighBiased(RelativeEpsilon)
+ a := populateStream(s)
+ verifyHighPercsWithRelativeEpsilon(t, a, s)
+}
+
+// BrokenTestTargetedMerge is broken, see Merge doc comment.
+func BrokenTestTargetedMerge(t *testing.T) {
+ rand.Seed(42)
+ s1 := NewTargeted(Targets)
+ s2 := NewTargeted(Targets)
+ a := populateStream(s1)
+ a = append(a, populateStream(s2)...)
+ s1.Merge(s2.Samples())
+ verifyPercsWithAbsoluteEpsilon(t, a, s1)
+}
+
+// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
+func BrokenTestLowBiasedMerge(t *testing.T) {
+ rand.Seed(42)
+ s1 := NewLowBiased(RelativeEpsilon)
+ s2 := NewLowBiased(RelativeEpsilon)
+ a := populateStream(s1)
+ a = append(a, populateStream(s2)...)
+ s1.Merge(s2.Samples())
+ verifyLowPercsWithRelativeEpsilon(t, a, s2)
+}
+
+// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
+func BrokenTestHighBiasedMerge(t *testing.T) {
+ rand.Seed(42)
+ s1 := NewHighBiased(RelativeEpsilon)
+ s2 := NewHighBiased(RelativeEpsilon)
+ a := populateStream(s1)
+ a = append(a, populateStream(s2)...)
+ s1.Merge(s2.Samples())
+ verifyHighPercsWithRelativeEpsilon(t, a, s2)
+}
+
+func TestUncompressed(t *testing.T) {
+ q := NewTargeted(Targets)
+ for i := 100; i > 0; i-- {
+ q.Insert(float64(i))
+ }
+ if g := q.Count(); g != 100 {
+ t.Errorf("want count 100, got %d", g)
+ }
+ // Before compression, Query should have 100% accuracy.
+ for quantile := range Targets {
+ w := quantile * 100
+ if g := q.Query(quantile); g != w {
+ t.Errorf("want %f, got %f", w, g)
+ }
+ }
+}
+
+func TestUncompressedSamples(t *testing.T) {
+ q := NewTargeted(map[float64]float64{0.99: 0.001})
+ for i := 1; i <= 100; i++ {
+ q.Insert(float64(i))
+ }
+ if g := q.Samples().Len(); g != 100 {
+ t.Errorf("want count 100, got %d", g)
+ }
+}
+
+func TestUncompressedOne(t *testing.T) {
+ q := NewTargeted(map[float64]float64{0.99: 0.01})
+ q.Insert(3.14)
+ if g := q.Query(0.90); g != 3.14 {
+ t.Error("want PI, got", g)
+ }
+}
+
+func TestDefaults(t *testing.T) {
+ if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
+ t.Errorf("want 0, got %f", g)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/Makefile b/vendor/src/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 00000000..e2e0651a
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/src/github.com/golang/protobuf/proto/all_test.go b/vendor/src/github.com/golang/protobuf/proto/all_test.go
new file mode 100644
index 00000000..41451a40
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/all_test.go
@@ -0,0 +1,2278 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/golang/protobuf/proto"
+ . "github.com/golang/protobuf/proto/testdata"
+)
+
+var globalO *Buffer
+
+func old() *Buffer {
+ if globalO == nil {
+ globalO = NewBuffer(nil)
+ }
+ globalO.Reset()
+ return globalO
+}
+
+func equalbytes(b1, b2 []byte, t *testing.T) {
+ if len(b1) != len(b2) {
+ t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
+ return
+ }
+ for i := 0; i < len(b1); i++ {
+ if b1[i] != b2[i] {
+ t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
+ }
+ }
+}
+
+func initGoTestField() *GoTestField {
+ f := new(GoTestField)
+ f.Label = String("label")
+ f.Type = String("type")
+ return f
+}
+
+// These are all structurally equivalent but the tag numbers differ.
+// (It's remarkable that required, optional, and repeated all have
+// 8 letters.)
+func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
+ return &GoTest_RequiredGroup{
+ RequiredField: String("required"),
+ }
+}
+
+func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
+ return &GoTest_OptionalGroup{
+ RequiredField: String("optional"),
+ }
+}
+
+func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
+ return &GoTest_RepeatedGroup{
+ RequiredField: String("repeated"),
+ }
+}
+
+func initGoTest(setdefaults bool) *GoTest {
+ pb := new(GoTest)
+ if setdefaults {
+ pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
+ pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
+ pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
+ pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
+ pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
+ pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
+ pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
+ pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
+ pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
+ pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
+ pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
+ pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
+ pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
+ }
+
+ pb.Kind = GoTest_TIME.Enum()
+ pb.RequiredField = initGoTestField()
+ pb.F_BoolRequired = Bool(true)
+ pb.F_Int32Required = Int32(3)
+ pb.F_Int64Required = Int64(6)
+ pb.F_Fixed32Required = Uint32(32)
+ pb.F_Fixed64Required = Uint64(64)
+ pb.F_Uint32Required = Uint32(3232)
+ pb.F_Uint64Required = Uint64(6464)
+ pb.F_FloatRequired = Float32(3232)
+ pb.F_DoubleRequired = Float64(6464)
+ pb.F_StringRequired = String("string")
+ pb.F_BytesRequired = []byte("bytes")
+ pb.F_Sint32Required = Int32(-32)
+ pb.F_Sint64Required = Int64(-64)
+ pb.Requiredgroup = initGoTest_RequiredGroup()
+
+ return pb
+}
+
+func fail(msg string, b *bytes.Buffer, s string, t *testing.T) {
+ data := b.Bytes()
+ ld := len(data)
+ ls := len(s) / 2
+
+ fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls)
+
+ // find the interesting spot - n
+ n := ls
+ if ld < ls {
+ n = ld
+ }
+ j := 0
+ for i := 0; i < n; i++ {
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ if data[i] == bs {
+ continue
+ }
+ n = i
+ break
+ }
+ l := n - 10
+ if l < 0 {
+ l = 0
+ }
+ h := n + 10
+
+ // find the interesting spot - n
+ fmt.Printf("is[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ld {
+ fmt.Printf(" --")
+ continue
+ }
+ fmt.Printf(" %.2x", data[i])
+ }
+ fmt.Printf("\n")
+
+ fmt.Printf("sb[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ls {
+ fmt.Printf(" --")
+ continue
+ }
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ fmt.Printf(" %.2x", bs)
+ }
+ fmt.Printf("\n")
+
+ t.Fail()
+
+ // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes())
+ // Print the output in a partially-decoded format; can
+ // be helpful when updating the test. It produces the output
+ // that is pasted, with minor edits, into the argument to verify().
+ // data := b.Bytes()
+ // nesting := 0
+ // for b.Len() > 0 {
+ // start := len(data) - b.Len()
+ // var u uint64
+ // u, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // wire := u & 0x7
+ // tag := u >> 3
+ // switch wire {
+ // case WireVarint:
+ // v, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed32:
+ // v, err := DecodeFixed32(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed32:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed64:
+ // v, err := DecodeFixed64(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed64:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireBytes:
+ // nb, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // after_tag := len(data) - b.Len()
+ // str := make([]byte, nb)
+ // _, err = b.Read(str)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n",
+ // data[start:after_tag], str, tag, wire)
+ // case WireStartGroup:
+ // nesting++
+ // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // case WireEndGroup:
+ // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // nesting--
+ // default:
+ // fmt.Printf("unrecognized wire type %d\n", wire)
+ // return
+ // }
+ // }
+}
+
+func hex(c uint8) uint8 {
+ if '0' <= c && c <= '9' {
+ return c - '0'
+ }
+ if 'a' <= c && c <= 'f' {
+ return 10 + c - 'a'
+ }
+ if 'A' <= c && c <= 'F' {
+ return 10 + c - 'A'
+ }
+ return 0
+}
+
+func equal(b []byte, s string, t *testing.T) bool {
+ if 2*len(b) != len(s) {
+ // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
+ fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
+ return false
+ }
+ for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
+ x := hex(s[j])*16 + hex(s[j+1])
+ if b[i] != x {
+ // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
+ fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
+ return false
+ }
+ }
+ return true
+}
+
+func overify(t *testing.T, pb *GoTest, expected string) {
+ o := old()
+ err := o.Marshal(pb)
+ if err != nil {
+ fmt.Printf("overify marshal-1 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 1", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = o.Unmarshal(pbd)
+ if err != nil {
+ t.Fatalf("overify unmarshal err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ o.Reset()
+ err = o.Marshal(pbd)
+ if err != nil {
+ t.Errorf("overify marshal-2 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 2", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+// Simple tests for numeric encode/decode primitives (varint, etc.)
+func TestNumericPrimitives(t *testing.T) {
+ for i := uint64(0); i < 1e6; i += 111 {
+ o := old()
+ if o.EncodeVarint(i) != nil {
+ t.Error("EncodeVarint")
+ break
+ }
+ x, e := o.DecodeVarint()
+ if e != nil {
+ t.Fatal("DecodeVarint")
+ }
+ if x != i {
+ t.Fatal("varint decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed32(i) != nil {
+ t.Fatal("encFixed32")
+ }
+ x, e = o.DecodeFixed32()
+ if e != nil {
+ t.Fatal("decFixed32")
+ }
+ if x != i {
+ t.Fatal("fixed32 decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed64(i*1234567) != nil {
+ t.Error("encFixed64")
+ break
+ }
+ x, e = o.DecodeFixed64()
+ if e != nil {
+ t.Error("decFixed64")
+ break
+ }
+ if x != i*1234567 {
+ t.Error("fixed64 decode fail:", i*1234567, x)
+ break
+ }
+
+ o = old()
+ i32 := int32(i - 12345)
+ if o.EncodeZigzag32(uint64(i32)) != nil {
+ t.Fatal("EncodeZigzag32")
+ }
+ x, e = o.DecodeZigzag32()
+ if e != nil {
+ t.Fatal("DecodeZigzag32")
+ }
+ if x != uint64(uint32(i32)) {
+ t.Fatal("zigzag32 decode fail:", i32, x)
+ }
+
+ o = old()
+ i64 := int64(i - 12345)
+ if o.EncodeZigzag64(uint64(i64)) != nil {
+ t.Fatal("EncodeZigzag64")
+ }
+ x, e = o.DecodeZigzag64()
+ if e != nil {
+ t.Fatal("DecodeZigzag64")
+ }
+ if x != uint64(i64) {
+ t.Fatal("zigzag64 decode fail:", i64, x)
+ }
+ }
+}
+
+// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
+type fakeMarshaler struct {
+ b []byte
+ err error
+}
+
+func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err }
+func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) }
+func (f *fakeMarshaler) ProtoMessage() {}
+func (f *fakeMarshaler) Reset() {}
+
+type msgWithFakeMarshaler struct {
+ M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"`
+}
+
+func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) }
+func (m *msgWithFakeMarshaler) ProtoMessage() {}
+func (m *msgWithFakeMarshaler) Reset() {}
+
+// Simple tests for proto messages that implement the Marshaler interface.
+func TestMarshalerEncoding(t *testing.T) {
+ tests := []struct {
+ name string
+ m Message
+ want []byte
+ errType reflect.Type
+ }{
+ {
+ name: "Marshaler that fails",
+ m: &fakeMarshaler{
+ err: errors.New("some marshal err"),
+ b: []byte{5, 6, 7},
+ },
+ // Since the Marshal method returned bytes, they should be written to the
+ // buffer. (For efficiency, we assume that Marshal implementations are
+ // always correct w.r.t. RequiredNotSetError and output.)
+ want: []byte{5, 6, 7},
+ errType: reflect.TypeOf(errors.New("some marshal err")),
+ },
+ {
+ name: "Marshaler that fails with RequiredNotSetError",
+ m: &msgWithFakeMarshaler{
+ M: &fakeMarshaler{
+ err: &RequiredNotSetError{},
+ b: []byte{5, 6, 7},
+ },
+ },
+ // Since there's an error that can be continued after,
+ // the buffer should be written.
+ want: []byte{
+ 10, 3, // for &msgWithFakeMarshaler
+ 5, 6, 7, // for &fakeMarshaler
+ },
+ errType: reflect.TypeOf(&RequiredNotSetError{}),
+ },
+ {
+ name: "Marshaler that succeeds",
+ m: &fakeMarshaler{
+ b: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ want: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ }
+ for _, test := range tests {
+ b := NewBuffer(nil)
+ err := b.Marshal(test.m)
+ if reflect.TypeOf(err) != test.errType {
+ t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType)
+ }
+ if !reflect.DeepEqual(test.want, b.Bytes()) {
+ t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
+ }
+ if size := Size(test.m); size != len(b.Bytes()) {
+ t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes()))
+ }
+
+ m, mErr := Marshal(test.m)
+ if !bytes.Equal(b.Bytes(), m) {
+ t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes())
+ }
+ if !reflect.DeepEqual(err, mErr) {
+ t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q",
+ test.name, fmt.Sprint(mErr), fmt.Sprint(err))
+ }
+ }
+}
+
+// Simple tests for bytes
+func TestBytesPrimitives(t *testing.T) {
+ o := old()
+ bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
+ if o.EncodeRawBytes(bytes) != nil {
+ t.Error("EncodeRawBytes")
+ }
+ decb, e := o.DecodeRawBytes(false)
+ if e != nil {
+ t.Error("DecodeRawBytes")
+ }
+ equalbytes(bytes, decb, t)
+}
+
+// Simple tests for strings
+func TestStringPrimitives(t *testing.T) {
+ o := old()
+ s := "now is the time"
+ if o.EncodeStringBytes(s) != nil {
+ t.Error("enc_string")
+ }
+ decs, e := o.DecodeStringBytes()
+ if e != nil {
+ t.Error("dec_string")
+ }
+ if s != decs {
+ t.Error("string encode/decode fail:", s, decs)
+ }
+}
+
+// Do we catch the "required bit not set" case?
+func TestRequiredBit(t *testing.T) {
+ o := old()
+ pb := new(GoTest)
+ err := o.Marshal(pb)
+ if err == nil {
+ t.Error("did not catch missing required fields")
+ } else if strings.Index(err.Error(), "Kind") < 0 {
+ t.Error("wrong error type:", err)
+ }
+}
+
+// Check that all fields are nil.
+// Clearly silly, and a residue from a more interesting test with an earlier,
+// different initialization property, but it once caught a compiler bug so
+// it lives.
+func checkInitialized(pb *GoTest, t *testing.T) {
+ if pb.F_BoolDefaulted != nil {
+ t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
+ }
+ if pb.F_Int32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
+ }
+ if pb.F_Int64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
+ }
+ if pb.F_Fixed32Defaulted != nil {
+ t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
+ }
+ if pb.F_Fixed64Defaulted != nil {
+ t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
+ }
+ if pb.F_Uint32Defaulted != nil {
+ t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
+ }
+ if pb.F_Uint64Defaulted != nil {
+ t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
+ }
+ if pb.F_FloatDefaulted != nil {
+ t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
+ }
+ if pb.F_DoubleDefaulted != nil {
+ t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
+ }
+ if pb.F_StringDefaulted != nil {
+ t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
+ }
+ if pb.F_BytesDefaulted != nil {
+ t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
+ }
+ if pb.F_Sint32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
+ }
+ if pb.F_Sint64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
+ }
+}
+
+// Does Reset() reset?
+func TestReset(t *testing.T) {
+ pb := initGoTest(true)
+ // muck with some values
+ pb.F_BoolDefaulted = Bool(false)
+ pb.F_Int32Defaulted = Int32(237)
+ pb.F_Int64Defaulted = Int64(12346)
+ pb.F_Fixed32Defaulted = Uint32(32000)
+ pb.F_Fixed64Defaulted = Uint64(666)
+ pb.F_Uint32Defaulted = Uint32(323232)
+ pb.F_Uint64Defaulted = nil
+ pb.F_FloatDefaulted = nil
+ pb.F_DoubleDefaulted = Float64(0)
+ pb.F_StringDefaulted = String("gotcha")
+ pb.F_BytesDefaulted = []byte("asdfasdf")
+ pb.F_Sint32Defaulted = Int32(123)
+ pb.F_Sint64Defaulted = Int64(789)
+ pb.Reset()
+ checkInitialized(pb, t)
+}
+
+// All required fields set, no defaults provided.
+func TestEncodeDecode1(t *testing.T) {
+ pb := initGoTest(false)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 0x20
+ "714000000000000000"+ // field 14, encoding 1, value 0x40
+ "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
+ "b304"+ // field 70, encoding 3, start group
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // field 70, encoding 4, end group
+ "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f") // field 103, encoding 0, 0x7f zigzag64
+}
+
+// All required fields set, defaults provided.
+func TestEncodeDecode2(t *testing.T) {
+ pb := initGoTest(true)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All default fields set to their default value by hand
+func TestEncodeDecode3(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolDefaulted = Bool(true)
+ pb.F_Int32Defaulted = Int32(32)
+ pb.F_Int64Defaulted = Int64(64)
+ pb.F_Fixed32Defaulted = Uint32(320)
+ pb.F_Fixed64Defaulted = Uint64(640)
+ pb.F_Uint32Defaulted = Uint32(3200)
+ pb.F_Uint64Defaulted = Uint64(6400)
+ pb.F_FloatDefaulted = Float32(314159)
+ pb.F_DoubleDefaulted = Float64(271828)
+ pb.F_StringDefaulted = String("hello, \"world!\"\n")
+ pb.F_BytesDefaulted = []byte("Bignose")
+ pb.F_Sint32Defaulted = Int32(-32)
+ pb.F_Sint64Defaulted = Int64(-64)
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all non-defaulted optional fields have values.
+func TestEncodeDecode4(t *testing.T) {
+ pb := initGoTest(true)
+ pb.Table = String("hello")
+ pb.Param = Int32(7)
+ pb.OptionalField = initGoTestField()
+ pb.F_BoolOptional = Bool(true)
+ pb.F_Int32Optional = Int32(32)
+ pb.F_Int64Optional = Int64(64)
+ pb.F_Fixed32Optional = Uint32(3232)
+ pb.F_Fixed64Optional = Uint64(6464)
+ pb.F_Uint32Optional = Uint32(323232)
+ pb.F_Uint64Optional = Uint64(646464)
+ pb.F_FloatOptional = Float32(32.)
+ pb.F_DoubleOptional = Float64(64.)
+ pb.F_StringOptional = String("hello")
+ pb.F_BytesOptional = []byte("Bignose")
+ pb.F_Sint32Optional = Int32(-32)
+ pb.F_Sint64Optional = Int64(-64)
+ pb.Optionalgroup = initGoTest_OptionalGroup()
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
+ "1807"+ // field 3, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "f00101"+ // field 30, encoding 0, value 1
+ "f80120"+ // field 31, encoding 0, value 32
+ "800240"+ // field 32, encoding 0, value 64
+ "8d02a00c0000"+ // field 33, encoding 5, value 3232
+ "91024019000000000000"+ // field 34, encoding 1, value 6464
+ "9802a0dd13"+ // field 35, encoding 0, value 323232
+ "a002c0ba27"+ // field 36, encoding 0, value 646464
+ "ad0200000042"+ // field 37, encoding 5, value 32.0
+ "b1020000000000005040"+ // field 38, encoding 1, value 64.0
+ "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "d305"+ // start group field 90 level 1
+ "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
+ "d405"+ // end group field 90 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
+ "f0123f"+ // field 302, encoding 0, value 63
+ "f8127f"+ // field 303, encoding 0, value 127
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestEncodeDecode5(t *testing.T) {
+ pb := initGoTest(true)
+ pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
+ pb.F_BoolRepeated = []bool{false, true}
+ pb.F_Int32Repeated = []int32{32, 33}
+ pb.F_Int64Repeated = []int64{64, 65}
+ pb.F_Fixed32Repeated = []uint32{3232, 3333}
+ pb.F_Fixed64Repeated = []uint64{6464, 6565}
+ pb.F_Uint32Repeated = []uint32{323232, 333333}
+ pb.F_Uint64Repeated = []uint64{646464, 656565}
+ pb.F_FloatRepeated = []float32{32., 33.}
+ pb.F_DoubleRepeated = []float64{64., 65.}
+ pb.F_StringRepeated = []string{"hello", "sailor"}
+ pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
+ pb.F_Sint32Repeated = []int32{32, -32}
+ pb.F_Sint64Repeated = []int64{64, -64}
+ pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "a00100"+ // field 20, encoding 0, value 0
+ "a00101"+ // field 20, encoding 0, value 1
+ "a80120"+ // field 21, encoding 0, value 32
+ "a80121"+ // field 21, encoding 0, value 33
+ "b00140"+ // field 22, encoding 0, value 64
+ "b00141"+ // field 22, encoding 0, value 65
+ "bd01a00c0000"+ // field 23, encoding 5, value 3232
+ "bd01050d0000"+ // field 23, encoding 5, value 3333
+ "c1014019000000000000"+ // field 24, encoding 1, value 6464
+ "c101a519000000000000"+ // field 24, encoding 1, value 6565
+ "c801a0dd13"+ // field 25, encoding 0, value 323232
+ "c80195ac14"+ // field 25, encoding 0, value 333333
+ "d001c0ba27"+ // field 26, encoding 0, value 646464
+ "d001b58928"+ // field 26, encoding 0, value 656565
+ "dd0100000042"+ // field 27, encoding 5, value 32.0
+ "dd0100000442"+ // field 27, encoding 5, value 33.0
+ "e1010000000000005040"+ // field 28, encoding 1, value 64.0
+ "e1010000000000405040"+ // field 28, encoding 1, value 65.0
+ "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
+ "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
+ "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
+ "d00c40"+ // field 202, encoding 0, value 32
+ "d00c3f"+ // field 202, encoding 0, value -32
+ "d80c8001"+ // field 203, encoding 0, value 64
+ "d80c7f"+ // field 203, encoding 0, value -64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, all packed repeated fields given two values.
+func TestEncodeDecode6(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolRepeatedPacked = []bool{false, true}
+ pb.F_Int32RepeatedPacked = []int32{32, 33}
+ pb.F_Int64RepeatedPacked = []int64{64, 65}
+ pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
+ pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
+ pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
+ pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
+ pb.F_FloatRepeatedPacked = []float32{32., 33.}
+ pb.F_DoubleRepeatedPacked = []float64{64., 65.}
+ pb.F_Sint32RepeatedPacked = []int32{32, -32}
+ pb.F_Sint64RepeatedPacked = []int64{64, -64}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
+ "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
+ "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
+ "aa0308"+ // field 53, encoding 2, 8 bytes
+ "a00c0000050d0000"+ // value 3232, value 3333
+ "b20310"+ // field 54, encoding 2, 16 bytes
+ "4019000000000000a519000000000000"+ // value 6464, value 6565
+ "ba0306"+ // field 55, encoding 2, 6 bytes
+ "a0dd1395ac14"+ // value 323232, value 333333
+ "c20306"+ // field 56, encoding 2, 6 bytes
+ "c0ba27b58928"+ // value 646464, value 656565
+ "ca0308"+ // field 57, encoding 2, 8 bytes
+ "0000004200000442"+ // value 32.0, value 33.0
+ "d20310"+ // field 58, encoding 2, 16 bytes
+ "00000000000050400000000000405040"+ // value 64.0, value 65.0
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "b21f02"+ // field 502, encoding 2, 2 bytes
+ "403f"+ // value 32, value -32
+ "ba1f03"+ // field 503, encoding 2, 3 bytes
+ "80017f") // value 64, value -64
+}
+
+// Test that we can encode empty bytes fields.
+func TestEncodeDecodeBytes1(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRequired = []byte{}
+ pb.F_BytesRepeated = [][]byte{{}}
+ pb.F_BytesOptional = []byte{}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
+ t.Error("required empty bytes field is incorrect")
+ }
+ if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
+ t.Error("repeated empty bytes field is incorrect")
+ }
+ if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
+ t.Error("optional empty bytes field is incorrect")
+ }
+}
+
+// Test that we encode nil-valued fields of a repeated bytes field correctly.
+// Since entries in a repeated field cannot be nil, nil must mean empty value.
+func TestEncodeDecodeBytes2(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRepeated = [][]byte{nil}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
+ t.Error("Unexpected value for repeated bytes field")
+ }
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestSkippingUnrecognizedFields(t *testing.T) {
+ o := old()
+ pb := initGoTestField()
+
+ // Marshal it normally.
+ o.Marshal(pb)
+
+ // Now new a GoSkipTest record.
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ // Marshal it into same buffer.
+ o.Marshal(skip)
+
+ pbd := new(GoTestField)
+ o.Unmarshal(pbd)
+
+ // The __unrecognized field should be a marshaling of GoSkipTest
+ skipd := new(GoSkipTest)
+
+ o.SetBuf(pbd.XXX_unrecognized)
+ o.Unmarshal(skipd)
+
+ if *skipd.SkipInt32 != *skip.SkipInt32 {
+ t.Error("skip int32", skipd.SkipInt32)
+ }
+ if *skipd.SkipFixed32 != *skip.SkipFixed32 {
+ t.Error("skip fixed32", skipd.SkipFixed32)
+ }
+ if *skipd.SkipFixed64 != *skip.SkipFixed64 {
+ t.Error("skip fixed64", skipd.SkipFixed64)
+ }
+ if *skipd.SkipString != *skip.SkipString {
+ t.Error("skip string", *skipd.SkipString)
+ }
+ if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
+ t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
+ }
+ if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
+ t.Error("skip group string", *skipd.Skipgroup.GroupString)
+ }
+}
+
+// Check that unrecognized fields of a submessage are preserved.
+func TestSubmessageUnrecognizedFields(t *testing.T) {
+ nm := &NewMessage{
+ Nested: &NewMessage_Nested{
+ Name: String("Nigel"),
+ FoodGroup: String("carbs"),
+ },
+ }
+ b, err := Marshal(nm)
+ if err != nil {
+ t.Fatalf("Marshal of NewMessage: %v", err)
+ }
+
+ // Unmarshal into an OldMessage.
+ om := new(OldMessage)
+ if err := Unmarshal(b, om); err != nil {
+ t.Fatalf("Unmarshal to OldMessage: %v", err)
+ }
+ exp := &OldMessage{
+ Nested: &OldMessage_Nested{
+ Name: String("Nigel"),
+ // normal protocol buffer users should not do this
+ XXX_unrecognized: []byte("\x12\x05carbs"),
+ },
+ }
+ if !Equal(om, exp) {
+ t.Errorf("om = %v, want %v", om, exp)
+ }
+
+ // Clone the OldMessage.
+ om = Clone(om).(*OldMessage)
+ if !Equal(om, exp) {
+ t.Errorf("Clone(om) = %v, want %v", om, exp)
+ }
+
+ // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
+ if b, err = Marshal(om); err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+ t.Logf("Marshal(%v) -> %q", om, b)
+ nm2 := new(NewMessage)
+ if err := Unmarshal(b, nm2); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ if !Equal(nm, nm2) {
+ t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
+ }
+}
+
+// Check that an int32 field can be upgraded to an int64 field.
+func TestNegativeInt32(t *testing.T) {
+ om := &OldMessage{
+ Num: Int32(-1),
+ }
+ b, err := Marshal(om)
+ if err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+
+ // Check the size. It should be 11 bytes;
+ // 1 for the field/wire type, and 10 for the negative number.
+ if len(b) != 11 {
+ t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
+ }
+
+ // Unmarshal into a NewMessage.
+ nm := new(NewMessage)
+ if err := Unmarshal(b, nm); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ want := &NewMessage{
+ Num: Int64(-1),
+ }
+ if !Equal(nm, want) {
+ t.Errorf("nm = %v, want %v", nm, want)
+ }
+}
+
+// Check that we can grow an array (repeated field) to have many elements.
+// This test doesn't depend only on our encoding; for variety, it makes sure
+// we create, encode, and decode the correct contents explicitly. It's therefore
+// a bit messier.
+// This test also uses (and hence tests) the Marshal/Unmarshal functions
+// instead of the methods.
+func TestBigRepeated(t *testing.T) {
+ pb := initGoTest(true)
+
+ // Create the arrays
+ const N = 50 // Internally the library starts much smaller.
+ pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
+ pb.F_Sint64Repeated = make([]int64, N)
+ pb.F_Sint32Repeated = make([]int32, N)
+ pb.F_BytesRepeated = make([][]byte, N)
+ pb.F_StringRepeated = make([]string, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ pb.F_FloatRepeated = make([]float32, N)
+ pb.F_Uint64Repeated = make([]uint64, N)
+ pb.F_Uint32Repeated = make([]uint32, N)
+ pb.F_Fixed64Repeated = make([]uint64, N)
+ pb.F_Fixed32Repeated = make([]uint32, N)
+ pb.F_Int64Repeated = make([]int64, N)
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_BoolRepeated = make([]bool, N)
+ pb.RepeatedField = make([]*GoTestField, N)
+
+ // Fill in the arrays with checkable values.
+ igtf := initGoTestField()
+ igtrg := initGoTest_RepeatedGroup()
+ for i := 0; i < N; i++ {
+ pb.Repeatedgroup[i] = igtrg
+ pb.F_Sint64Repeated[i] = int64(i)
+ pb.F_Sint32Repeated[i] = int32(i)
+ s := fmt.Sprint(i)
+ pb.F_BytesRepeated[i] = []byte(s)
+ pb.F_StringRepeated[i] = s
+ pb.F_DoubleRepeated[i] = float64(i)
+ pb.F_FloatRepeated[i] = float32(i)
+ pb.F_Uint64Repeated[i] = uint64(i)
+ pb.F_Uint32Repeated[i] = uint32(i)
+ pb.F_Fixed64Repeated[i] = uint64(i)
+ pb.F_Fixed32Repeated[i] = uint32(i)
+ pb.F_Int64Repeated[i] = int64(i)
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_BoolRepeated[i] = i%2 == 0
+ pb.RepeatedField[i] = igtf
+ }
+
+ // Marshal.
+ buf, _ := Marshal(pb)
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ Unmarshal(buf, pbd)
+
+ // Check the checkable values
+ for i := uint64(0); i < N; i++ {
+ if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
+ t.Error("pbd.Repeatedgroup bad")
+ }
+ var x uint64
+ x = uint64(pbd.F_Sint64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Sint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint32Repeated bad", x, i)
+ }
+ s := fmt.Sprint(i)
+ equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
+ if pbd.F_StringRepeated[i] != s {
+ t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
+ }
+ x = uint64(pbd.F_DoubleRepeated[i])
+ if x != i {
+ t.Error("pbd.F_DoubleRepeated bad", x, i)
+ }
+ x = uint64(pbd.F_FloatRepeated[i])
+ if x != i {
+ t.Error("pbd.F_FloatRepeated bad", x, i)
+ }
+ x = pbd.F_Uint64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Uint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Uint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Uint32Repeated bad", x, i)
+ }
+ x = pbd.F_Fixed64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Fixed64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Fixed32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Fixed32Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int32Repeated bad", x, i)
+ }
+ if pbd.F_BoolRepeated[i] != (i%2 == 0) {
+ t.Error("pbd.F_BoolRepeated bad", x, i)
+ }
+ if pbd.RepeatedField[i] == nil { // TODO: more checking?
+ t.Error("pbd.RepeatedField bad")
+ }
+ }
+}
+
+// Verify we give a useful message when decoding to the wrong structure type.
+func TestTypeMismatch(t *testing.T) {
+ pb1 := initGoTest(true)
+
+ // Marshal
+ o := old()
+ o.Marshal(pb1)
+
+ // Now Unmarshal it to the wrong type.
+ pb2 := initGoTestField()
+ err := o.Unmarshal(pb2)
+ if err == nil {
+ t.Error("expected error, got no error")
+ } else if !strings.Contains(err.Error(), "bad wiretype") {
+ t.Error("expected bad wiretype error, got", err)
+ }
+}
+
+func encodeDecode(t *testing.T, in, out Message, msg string) {
+ buf, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("failed marshaling %v: %v", msg, err)
+ }
+ if err := Unmarshal(buf, out); err != nil {
+ t.Fatalf("failed unmarshaling %v: %v", msg, err)
+ }
+}
+
+func TestPackedNonPackedDecoderSwitching(t *testing.T) {
+ np, p := new(NonPackedTest), new(PackedTest)
+
+ // non-packed -> packed
+ np.A = []int32{0, 1, 1, 2, 3, 5}
+ encodeDecode(t, np, p, "non-packed -> packed")
+ if !reflect.DeepEqual(np.A, p.B) {
+ t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
+ }
+
+ // packed -> non-packed
+ np.Reset()
+ p.B = []int32{3, 1, 4, 1, 5, 9}
+ encodeDecode(t, p, np, "packed -> non-packed")
+ if !reflect.DeepEqual(p.B, np.A) {
+ t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
+ }
+}
+
+func TestProto1RepeatedGroup(t *testing.T) {
+ pb := &MessageList{
+ Message: []*MessageList_Message{
+ {
+ Name: String("blah"),
+ Count: Int32(7),
+ },
+ // NOTE: pb.Message[1] is a nil
+ nil,
+ },
+ }
+
+ o := old()
+ err := o.Marshal(pb)
+ if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
+ t.Fatalf("unexpected or no error when marshaling: %v", err)
+ }
+}
+
+// Test that enums work. Checks for a bug introduced by making enums
+// named types instead of int32: newInt32FromUint64 would crash with
+// a type mismatch in reflect.PointTo.
+func TestEnum(t *testing.T) {
+ pb := new(GoEnum)
+ pb.Foo = FOO_FOO1.Enum()
+ o := old()
+ if err := o.Marshal(pb); err != nil {
+ t.Fatal("error encoding enum:", err)
+ }
+ pb1 := new(GoEnum)
+ if err := o.Unmarshal(pb1); err != nil {
+ t.Fatal("error decoding enum:", err)
+ }
+ if *pb1.Foo != FOO_FOO1 {
+ t.Error("expected 7 but got ", *pb1.Foo)
+ }
+}
+
+// Enum types have String methods. Check that enum fields can be printed.
+// We don't care what the value actually is, just as long as it doesn't crash.
+func TestPrintingNilEnumFields(t *testing.T) {
+ pb := new(GoEnum)
+ _ = fmt.Sprintf("%+v", pb)
+}
+
+// Verify that absent required fields cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcement(t *testing.T) {
+ pb := new(GoTestField)
+ _, err := Marshal(pb)
+ if err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
+ // so simply counting the required fields is insufficient.
+ // field 1, encoding 2, value "hi"
+ buf := []byte("\x0A\x02hi\x0A\x02hi")
+ err = Unmarshal(buf, pb)
+ if err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcementGroups(t *testing.T) {
+ pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}}
+ if _, err := Marshal(pb); err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ buf := []byte{11, 12}
+ if err := Unmarshal(buf, pb); err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+func TestTypedNilMarshal(t *testing.T) {
+ // A typed nil should return ErrNil and not crash.
+ {
+ var m *GoEnum
+ if _, err := Marshal(m); err != ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err)
+ }
+ }
+
+ {
+ m := &Communique{Union: &Communique_Msg{nil}}
+ if _, err := Marshal(m); err == nil || err == ErrNil {
+ t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err)
+ }
+ }
+}
+
+// A type that implements the Marshaler interface, but is not nillable.
+type nonNillableInt uint64
+
+func (nni nonNillableInt) Marshal() ([]byte, error) {
+ return EncodeVarint(uint64(nni)), nil
+}
+
+type NNIMessage struct {
+ nni nonNillableInt
+}
+
+func (*NNIMessage) Reset() {}
+func (*NNIMessage) String() string { return "" }
+func (*NNIMessage) ProtoMessage() {}
+
+// A type that implements the Marshaler interface and is nillable.
+type nillableMessage struct {
+ x uint64
+}
+
+func (nm *nillableMessage) Marshal() ([]byte, error) {
+ return EncodeVarint(nm.x), nil
+}
+
+type NMMessage struct {
+ nm *nillableMessage
+}
+
+func (*NMMessage) Reset() {}
+func (*NMMessage) String() string { return "" }
+func (*NMMessage) ProtoMessage() {}
+
+// Verify a type that uses the Marshaler interface, but has a nil pointer.
+func TestNilMarshaler(t *testing.T) {
+ // Try a struct with a Marshaler field that is nil.
+ // It should be directly marshable.
+ nmm := new(NMMessage)
+ if _, err := Marshal(nmm); err != nil {
+ t.Error("unexpected error marshaling nmm: ", err)
+ }
+
+ // Try a struct with a Marshaler field that is not nillable.
+ nnim := new(NNIMessage)
+ nnim.nni = 7
+ var _ Marshaler = nnim.nni // verify it is truly a Marshaler
+ if _, err := Marshal(nnim); err != nil {
+ t.Error("unexpected error marshaling nnim: ", err)
+ }
+}
+
+func TestAllSetDefaults(t *testing.T) {
+ // Exercise SetDefaults with all scalar field types.
+ m := &Defaults{
+ // NaN != NaN, so override that here.
+ F_Nan: Float32(1.7),
+ }
+ expected := &Defaults{
+ F_Bool: Bool(true),
+ F_Int32: Int32(32),
+ F_Int64: Int64(64),
+ F_Fixed32: Uint32(320),
+ F_Fixed64: Uint64(640),
+ F_Uint32: Uint32(3200),
+ F_Uint64: Uint64(6400),
+ F_Float: Float32(314159),
+ F_Double: Float64(271828),
+ F_String: String(`hello, "world!"` + "\n"),
+ F_Bytes: []byte("Bignose"),
+ F_Sint32: Int32(-32),
+ F_Sint64: Int64(-64),
+ F_Enum: Defaults_GREEN.Enum(),
+ F_Pinf: Float32(float32(math.Inf(1))),
+ F_Ninf: Float32(float32(math.Inf(-1))),
+ F_Nan: Float32(1.7),
+ StrZero: String(""),
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithSetField(t *testing.T) {
+ // Check that a set value is not overridden.
+ m := &Defaults{
+ F_Int32: Int32(12),
+ }
+ SetDefaults(m)
+ if v := m.GetF_Int32(); v != 12 {
+ t.Errorf("m.FInt32 = %v, want 12", v)
+ }
+}
+
+func TestSetDefaultsWithSubMessage(t *testing.T) {
+ m := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ },
+ }
+ expected := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ Port: Int32(4000),
+ },
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
+ m := &MyMessage{
+ RepInner: []*InnerMessage{{}},
+ }
+ expected := &MyMessage{
+ RepInner: []*InnerMessage{{
+ Port: Int32(4000),
+ }},
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
+ m := &MyMessage{
+ Pet: []string{"turtle", "wombat"},
+ }
+ expected := Clone(m)
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestMaximumTagNumber(t *testing.T) {
+ m := &MaxTag{
+ LastField: String("natural goat essence"),
+ }
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal failed: %v", err)
+ }
+ m2 := new(MaxTag)
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Fatalf("proto.Unmarshal failed: %v", err)
+ }
+ if got, want := m2.GetLastField(), *m.LastField; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestJSON(t *testing.T) {
+ m := &MyMessage{
+ Count: Int32(4),
+ Pet: []string{"bunny", "kitty"},
+ Inner: &InnerMessage{
+ Host: String("cauchy"),
+ },
+ Bikeshed: MyMessage_GREEN.Enum(),
+ }
+ const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
+
+ b, err := json.Marshal(m)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ s := string(b)
+ if s != expected {
+ t.Errorf("got %s\nwant %s", s, expected)
+ }
+
+ received := new(MyMessage)
+ if err := json.Unmarshal(b, received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+
+ // Test unmarshalling of JSON with symbolic enum name.
+ const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
+ received.Reset()
+ if err := json.Unmarshal([]byte(old), received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+}
+
+func TestBadWireType(t *testing.T) {
+ b := []byte{7<<3 | 6} // field 7, wire type 6
+ pb := new(OtherMessage)
+ if err := Unmarshal(b, pb); err == nil {
+ t.Errorf("Unmarshal did not fail")
+ } else if !strings.Contains(err.Error(), "unknown wire type") {
+ t.Errorf("wrong error: %v", err)
+ }
+}
+
+func TestBytesWithInvalidLength(t *testing.T) {
+ // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
+ b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestLengthOverflow(t *testing.T) {
+ // Overflowing a length should not panic.
+ b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestVarintOverflow(t *testing.T) {
+ // Overflowing a 64-bit length should not be allowed.
+ b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
+ if err := Unmarshal(b, new(MyMessage)); err == nil {
+ t.Fatalf("Overflowed uint64 length without error")
+ }
+}
+
+func TestUnmarshalFuzz(t *testing.T) {
+ const N = 1000
+ seed := time.Now().UnixNano()
+ t.Logf("RNG seed is %d", seed)
+ rng := rand.New(rand.NewSource(seed))
+ buf := make([]byte, 20)
+ for i := 0; i < N; i++ {
+ for j := range buf {
+ buf[j] = byte(rng.Intn(256))
+ }
+ fuzzUnmarshal(t, buf)
+ }
+}
+
+func TestMergeMessages(t *testing.T) {
+ pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
+ data, err := Marshal(pb)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ pb1 := new(MessageList)
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("first Unmarshal: %v", err)
+ }
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("second Unmarshal: %v", err)
+ }
+ if len(pb1.Message) != 1 {
+ t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
+ }
+
+ pb2 := new(MessageList)
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("first UnmarshalMerge: %v", err)
+ }
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("second UnmarshalMerge: %v", err)
+ }
+ if len(pb2.Message) != 2 {
+ t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
+ }
+}
+
+func TestExtensionMarshalOrder(t *testing.T) {
+ m := &MyMessage{Count: Int(123)}
+ if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ var orig []byte
+ for i := 0; i < 100; i++ {
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if i == 0 {
+ orig = b
+ continue
+ }
+ if !bytes.Equal(b, orig) {
+ t.Errorf("Bytes differ on attempt #%d", i)
+ }
+ }
+}
+
+// Many extensions, because small maps might not iterate differently on each iteration.
+var exts = []*ExtensionDesc{
+ E_X201,
+ E_X202,
+ E_X203,
+ E_X204,
+ E_X205,
+ E_X206,
+ E_X207,
+ E_X208,
+ E_X209,
+ E_X210,
+ E_X211,
+ E_X212,
+ E_X213,
+ E_X214,
+ E_X215,
+ E_X216,
+ E_X217,
+ E_X218,
+ E_X219,
+ E_X220,
+ E_X221,
+ E_X222,
+ E_X223,
+ E_X224,
+ E_X225,
+ E_X226,
+ E_X227,
+ E_X228,
+ E_X229,
+ E_X230,
+ E_X231,
+ E_X232,
+ E_X233,
+ E_X234,
+ E_X235,
+ E_X236,
+ E_X237,
+ E_X238,
+ E_X239,
+ E_X240,
+ E_X241,
+ E_X242,
+ E_X243,
+ E_X244,
+ E_X245,
+ E_X246,
+ E_X247,
+ E_X248,
+ E_X249,
+ E_X250,
+}
+
+func TestMessageSetMarshalOrder(t *testing.T) {
+ m := &MyMessageSet{}
+ for _, x := range exts {
+ if err := SetExtension(m, x, &Empty{}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ }
+
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ for i := 0; i < 10; i++ {
+ b1, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(b1, buf) {
+ t.Errorf("Bytes differ on re-Marshal #%d", i)
+ }
+
+ m2 := &MyMessageSet{}
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ b2, err := Marshal(m2)
+ if err != nil {
+ t.Errorf("re-Marshal: %v", err)
+ }
+ if !bytes.Equal(b2, buf) {
+ t.Errorf("Bytes differ on round-trip #%d", i)
+ }
+ }
+}
+
+func TestUnmarshalMergesMessages(t *testing.T) {
+ // If a nested message occurs twice in the input,
+ // the fields should be merged when decoding.
+ a := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("polhode"),
+ Port: Int32(1234),
+ },
+ }
+ aData, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal(a): %v", err)
+ }
+ b := &OtherMessage{
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Connected: Bool(true),
+ },
+ }
+ bData, err := Marshal(b)
+ if err != nil {
+ t.Fatalf("Marshal(b): %v", err)
+ }
+ want := &OtherMessage{
+ Key: Int64(123),
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Port: Int32(1234),
+ Connected: Bool(true),
+ },
+ }
+ got := new(OtherMessage)
+ if err := Unmarshal(append(aData, bData...), got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(got, want) {
+ t.Errorf("\n got %v\nwant %v", got, want)
+ }
+}
+
+func TestEncodingSizes(t *testing.T) {
+ tests := []struct {
+ m Message
+ n int
+ }{
+ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
+ {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
+ {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
+ {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
+ }
+ for _, test := range tests {
+ b, err := Marshal(test.m)
+ if err != nil {
+ t.Errorf("Marshal(%v): %v", test.m, err)
+ continue
+ }
+ if len(b) != test.n {
+ t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
+ }
+ }
+}
+
+func TestRequiredNotSetError(t *testing.T) {
+ pb := initGoTest(false)
+ pb.RequiredField.Label = nil
+ pb.F_Int32Required = nil
+ pb.F_Int64Required = nil
+
+ expected := "0807" + // field 1, encoding 0, value 7
+ "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
+ "5001" + // field 10, encoding 0, value 1
+ "6d20000000" + // field 13, encoding 5, value 0x20
+ "714000000000000000" + // field 14, encoding 1, value 0x40
+ "78a019" + // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45" + // field 17, encoding 5, value 3232.0
+ "9101000000000040b940" + // field 18, encoding 1, value 6464.0
+ "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
+ "b304" + // field 70, encoding 3, start group
+ "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
+ "b404" + // field 70, encoding 4, end group
+ "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
+ "b0063f" + // field 102, encoding 0, 0x3f zigzag32
+ "b8067f" // field 103, encoding 0, 0x7f zigzag64
+
+ o := old()
+ bytes, err := Marshal(pb)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-1 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 1", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = Unmarshal(bytes, pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 {
+ t.Errorf("unmarshal wrong err msg: %v", err)
+ }
+ bytes, err = Marshal(pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-2 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 2", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+func fuzzUnmarshal(t *testing.T, data []byte) {
+ defer func() {
+ if e := recover(); e != nil {
+ t.Errorf("These bytes caused a panic: %+v", data)
+ t.Logf("Stack:\n%s", debug.Stack())
+ t.FailNow()
+ }
+ }()
+
+ pb := new(MyMessage)
+ Unmarshal(data, pb)
+}
+
+func TestMapFieldMarshal(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // b should be the concatenation of these three byte sequences in some order.
+ parts := []string{
+ "\n\a\b\x01\x12\x03Rob",
+ "\n\a\b\x04\x12\x03Ian",
+ "\n\b\b\x08\x12\x04Dave",
+ }
+ ok := false
+ for i := range parts {
+ for j := range parts {
+ if j == i {
+ continue
+ }
+ for k := range parts {
+ if k == i || k == j {
+ continue
+ }
+ try := parts[i] + parts[j] + parts[k]
+ if bytes.Equal(b, []byte(try)) {
+ ok = true
+ break
+ }
+ }
+ }
+ }
+ if !ok {
+ t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
+ }
+ t.Logf("FYI b: %q", b)
+
+ (new(Buffer)).DebugPrint("Dump of b", b)
+}
+
+func TestMapFieldRoundTrips(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ 0x7001: &FloatingPoint{F: Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{
+ false: []byte("that's not right!"),
+ true: []byte("aye, 'tis true!"),
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("FYI b: %q", b)
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ for _, pair := range [][2]interface{}{
+ {m.NameMapping, m2.NameMapping},
+ {m.MsgMapping, m2.MsgMapping},
+ {m.ByteMapping, m2.ByteMapping},
+ } {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1])
+ }
+ }
+}
+
+func TestMapFieldWithNil(t *testing.T) {
+ m1 := &MessageWithMap{
+ MsgMapping: map[int64]*FloatingPoint{
+ 1: nil,
+ },
+ }
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.MsgMapping[1]; !ok {
+ t.Error("msg_mapping[1] not present")
+ } else if v != nil {
+ t.Errorf("msg_mapping[1] not nil: %v", v)
+ }
+}
+
+func TestMapFieldWithNilBytes(t *testing.T) {
+ m1 := &MessageWithMap{
+ ByteMapping: map[bool][]byte{
+ false: []byte{},
+ true: nil,
+ },
+ }
+ n := Size(m1)
+ b, err := Marshal(m1)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if n != len(b) {
+ t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b))
+ }
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
+ }
+ if v, ok := m2.ByteMapping[false]; !ok {
+ t.Error("byte_mapping[false] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[false] not empty: %#v", v)
+ }
+ if v, ok := m2.ByteMapping[true]; !ok {
+ t.Error("byte_mapping[true] not present")
+ } else if len(v) != 0 {
+ t.Errorf("byte_mapping[true] not empty: %#v", v)
+ }
+}
+
+func TestDecodeMapFieldMissingKey(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes
+ // no key
+ 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m"
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing key: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestDecodeMapFieldMissingValue(t *testing.T) {
+ b := []byte{
+ 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes
+ 0x08, 0x01, // varint key, value 1
+ // no value
+ }
+ got := &MessageWithMap{}
+ err := Unmarshal(b, got)
+ if err != nil {
+ t.Fatalf("failed to marshal map with missing value: %v", err)
+ }
+ want := &MessageWithMap{NameMapping: map[int32]string{1: ""}}
+ if !Equal(got, want) {
+ t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want)
+ }
+}
+
+func TestOneof(t *testing.T) {
+ m := &Communique{}
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of empty message with oneof: %v", err)
+ }
+ if len(b) != 0 {
+ t.Errorf("Marshal of empty message yielded too many bytes: %v", b)
+ }
+
+ m = &Communique{
+ Union: &Communique_Name{"Barry"},
+ }
+
+ // Round-trip.
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof: %v", err)
+ }
+ if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5)
+ t.Errorf("Incorrect marshal of message with oneof: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof: %v", err)
+ }
+ if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" {
+ t.Errorf("After round trip, Union = %+v", m.Union)
+ }
+ if name := m.GetName(); name != "Barry" {
+ t.Errorf("After round trip, GetName = %q, want %q", name, "Barry")
+ }
+
+ // Let's try with a message in the oneof.
+ m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}}
+ b, err = Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal of message with oneof set to message: %v", err)
+ }
+ if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16)
+ t.Errorf("Incorrect marshal of message with oneof set to message: %v", b)
+ }
+ m.Reset()
+ if err := Unmarshal(b, m); err != nil {
+ t.Fatalf("Unmarshal of message with oneof set to message: %v", err)
+ }
+ ss, ok := m.Union.(*Communique_Msg)
+ if !ok || ss.Msg.GetStringField() != "deep deep string" {
+ t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union)
+ }
+}
+
+func TestInefficientPackedBool(t *testing.T) {
+ // https://github.com/golang/protobuf/issues/76
+ inp := []byte{
+ 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes
+ // Usually a bool should take a single byte,
+ // but it is permitted to be any varint.
+ 0xb9, 0x30,
+ }
+ if err := Unmarshal(inp, new(MoreRepeated)); err != nil {
+ t.Error(err)
+ }
+}
+
+// Benchmarks
+
+func testMsg() *GoTest {
+ pb := initGoTest(true)
+ const N = 1000 // Internally the library starts much smaller.
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ for i := 0; i < N; i++ {
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_DoubleRepeated[i] = float64(i)
+ }
+ return pb
+}
+
+func bytesMsg() *GoTest {
+ pb := initGoTest(true)
+ buf := make([]byte, 4000)
+ for i := range buf {
+ buf[i] = byte(i)
+ }
+ pb.F_BytesDefaulted = buf
+ return pb
+}
+
+func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
+ d, _ := marshal(pb)
+ b.SetBytes(int64(len(d)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ marshal(pb)
+ }
+}
+
+func benchmarkBufferMarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ p.Reset()
+ err := p.Marshal(pb0)
+ return p.Bytes(), err
+ })
+}
+
+func benchmarkSize(b *testing.B, pb Message) {
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ Size(pb)
+ return nil, nil
+ })
+}
+
+func newOf(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+ return reflect.New(in.Type().Elem()).Interface().(Message)
+}
+
+func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
+ d, _ := Marshal(pb)
+ b.SetBytes(int64(len(d)))
+ pbd := newOf(pb)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ unmarshal(d, pbd)
+ }
+}
+
+func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
+ p.SetBuf(d)
+ return p.Unmarshal(pb0)
+ })
+}
+
+// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
+
+func BenchmarkMarshal(b *testing.B) {
+ benchmarkMarshal(b, testMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshal(b *testing.B) {
+ benchmarkBufferMarshal(b, testMsg())
+}
+
+func BenchmarkSize(b *testing.B) {
+ benchmarkSize(b, testMsg())
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ benchmarkUnmarshal(b, testMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshal(b *testing.B) {
+ benchmarkBufferUnmarshal(b, testMsg())
+}
+
+func BenchmarkMarshalBytes(b *testing.B) {
+ benchmarkMarshal(b, bytesMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshalBytes(b *testing.B) {
+ benchmarkBufferMarshal(b, bytesMsg())
+}
+
+func BenchmarkSizeBytes(b *testing.B) {
+ benchmarkSize(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalBytes(b *testing.B) {
+ benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshalBytes(b *testing.B) {
+ benchmarkBufferUnmarshal(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
+ b.StopTimer()
+ pb := initGoTestField()
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ pbd := new(GoTestField)
+ p := NewBuffer(nil)
+ p.Marshal(pb)
+ p.Marshal(skip)
+ p2 := NewBuffer(nil)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ p2.SetBuf(p.Bytes())
+ p2.Unmarshal(pbd)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/any_test.go b/vendor/src/github.com/golang/protobuf/proto/any_test.go
new file mode 100644
index 00000000..1a3c22ed
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/any_test.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ testpb "github.com/golang/protobuf/proto/testdata"
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+var (
+ expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
+ expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
+)
+
+// anyEqual reports whether two messages which may be google.protobuf.Any or may
+// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
+// comparison, because semantically equivalent messages may be marshaled to
+// binary in different tag order. Instead, trust that TextMarshaler with
+// ExpandAny option works and compare the text marshaling results.
+func anyEqual(got, want proto.Message) bool {
+ // if messages are proto.Equal, no need to marshal.
+ if proto.Equal(got, want) {
+ return true
+ }
+ g := expandedMarshaler.Text(got)
+ w := expandedMarshaler.Text(want)
+ return g == w
+}
+
+type golden struct {
+ m proto.Message
+ t, c string
+}
+
+var goldenMessages = makeGolden()
+
+func makeGolden() []golden {
+ nested := &pb.Nested{Bunny: "Monty"}
+ nb, err := proto.Marshal(nested)
+ if err != nil {
+ panic(err)
+ }
+ m1 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m2 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
+ }
+ m3 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
+ }
+ m4 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
+ }
+ m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
+
+ any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
+ proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
+ proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
+ any1b, err := proto.Marshal(any1)
+ if err != nil {
+ panic(err)
+ }
+ any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
+ proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
+ any2b, err := proto.Marshal(any2)
+ if err != nil {
+ panic(err)
+ }
+ m6 := &pb.Message{
+ Name: "David",
+ ResultCount: 47,
+ Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ ManyThings: []*anypb.Any{
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
+ &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
+ },
+ }
+
+ const (
+ m1Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m2Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m3Golden = `
+name: "David"
+result_count: 47
+anything: <
+ ["type.googleapis.com/\"/proto3_proto.Nested"]: <
+ bunny: "Monty"
+ >
+>
+`
+ m4Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+>
+`
+ m5Golden = `
+[type.googleapis.com/proto3_proto.Nested]: <
+ bunny: "Monty"
+>
+`
+ m6Golden = `
+name: "David"
+result_count: 47
+anything: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 47
+ name: "David"
+ [testdata.Ext.more]: <
+ data: "foo"
+ >
+ [testdata.Ext.text]: "bar"
+ >
+>
+many_things: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 42
+ bikeshed: GREEN
+ rep_bytes: "roboto"
+ [testdata.Ext.more]: <
+ data: "baz"
+ >
+ >
+>
+many_things: <
+ [type.googleapis.com/testdata.MyMessage]: <
+ count: 47
+ name: "David"
+ [testdata.Ext.more]: <
+ data: "foo"
+ >
+ [testdata.Ext.text]: "bar"
+ >
+>
+`
+ )
+ return []golden{
+ {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
+ {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
+ {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
+ {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
+ {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
+ {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
+ }
+}
+
+func TestMarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
+ t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
+ }
+ if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
+ t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
+ }
+ }
+}
+
+func TestUnmarshalGolden(t *testing.T) {
+ for _, tt := range goldenMessages {
+ want := tt.m
+ got := proto.Clone(tt.m)
+ got.Reset()
+ if err := proto.UnmarshalText(tt.t, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
+ }
+ got.Reset()
+ if err := proto.UnmarshalText(tt.c, got); err != nil {
+ t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
+ }
+ if !anyEqual(got, want) {
+ t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
+ }
+ }
+}
+
+func TestMarshalUnknownAny(t *testing.T) {
+ m := &pb.Message{
+ Anything: &anypb.Any{
+ TypeUrl: "foo",
+ Value: []byte("bar"),
+ },
+ }
+ want := `anything: <
+ type_url: "foo"
+ value: "bar"
+>
+`
+ got := expandedMarshaler.Text(m)
+ if got != want {
+ t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
+ }
+}
+
+func TestAmbiguousAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ type_url: "ttt/proto3_proto.Nested"
+ value: "\n\x05Monty"
+ `, pb)
+ t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
+ if err != nil {
+ t.Errorf("failed to parse ambiguous Any message: %v", err)
+ }
+}
+
+func TestUnmarshalOverwriteAny(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Monty"
+ >
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 7: Any message unpacked multiple times, or "type_url" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
+
+func TestUnmarshalAnyMixAndMatch(t *testing.T) {
+ pb := &anypb.Any{}
+ err := proto.UnmarshalText(`
+ value: "\n\x05Monty"
+ [type.googleapis.com/a/path/proto3_proto.Nested]: <
+ bunny: "Rabbit of Caerbannog"
+ >
+ `, pb)
+ want := `line 5: Any message unpacked multiple times, or "value" already set`
+ if err.Error() != want {
+ t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/clone.go b/vendor/src/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..e392575b
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/clone_test.go b/vendor/src/github.com/golang/protobuf/proto/clone_test.go
new file mode 100644
index 00000000..f607ff49
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/clone_test.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+var cloneTestMessage = &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+}
+
+func init() {
+ ext := &pb.Ext{
+ Data: proto.String("extension"),
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+}
+
+func TestClone(t *testing.T) {
+ m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
+ if !proto.Equal(m, cloneTestMessage) {
+ t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
+ }
+
+ // Verify it was a deep copy.
+ *m.Inner.Port++
+ if proto.Equal(m, cloneTestMessage) {
+ t.Error("Mutating clone changed the original")
+ }
+ // Byte fields and repeated fields should be copied.
+ if &m.Pet[0] == &cloneTestMessage.Pet[0] {
+ t.Error("Pet: repeated field not copied")
+ }
+ if &m.Others[0] == &cloneTestMessage.Others[0] {
+ t.Error("Others: repeated field not copied")
+ }
+ if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
+ t.Error("Others[0].Value: bytes field not copied")
+ }
+ if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
+ t.Error("RepBytes: repeated field not copied")
+ }
+ if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
+ t.Error("RepBytes[0]: bytes field not copied")
+ }
+}
+
+func TestCloneNil(t *testing.T) {
+ var m *pb.MyMessage
+ if c := proto.Clone(m); !proto.Equal(m, c) {
+ t.Errorf("Clone(%v) = %v", m, c)
+ }
+}
+
+var mergeTests = []struct {
+ src, dst, want proto.Message
+}{
+ {
+ src: &pb.MyMessage{
+ Count: proto.Int32(42),
+ },
+ dst: &pb.MyMessage{
+ Name: proto.String("Dave"),
+ },
+ want: &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ },
+ Pet: []string{"horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ dst: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {
+ // Explicitly test a src=nil field
+ Inner: nil,
+ },
+ },
+ },
+ want: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {},
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ RepBytes: [][]byte{[]byte("wow")},
+ },
+ dst: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham")},
+ },
+ want: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+ },
+ },
+ // Check that a scalar bytes field replaces rather than appends.
+ {
+ src: &pb.OtherMessage{Value: []byte("foo")},
+ dst: &pb.OtherMessage{Value: []byte("bar")},
+ want: &pb.OtherMessage{Value: []byte("foo")},
+ },
+ {
+ src: &pb.MessageWithMap{
+ NameMapping: map[int32]string{6: "Nigel"},
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ dst: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Bruce", // should be overwritten
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(3.0),
+ Exact: proto.Bool(true),
+ }, // the entire message should be overwritten
+ },
+ },
+ want: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Nigel",
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ 0x4002: &pb.FloatingPoint{
+ F: proto.Float64(2.0),
+ },
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ },
+ // proto3 shouldn't merge zero values,
+ // in the same way that proto2 shouldn't merge nils.
+ {
+ src: &proto3pb.Message{
+ Name: "Aaron",
+ Data: []byte(""), // zero value, but not nil
+ },
+ dst: &proto3pb.Message{
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ want: &proto3pb.Message{
+ Name: "Aaron",
+ HeightInCm: 176,
+ Data: []byte("texas!"),
+ },
+ },
+ // Oneof fields should merge by assignment.
+ {
+ src: &pb.Communique{
+ Union: &pb.Communique_Number{41},
+ },
+ dst: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ want: &pb.Communique{
+ Union: &pb.Communique_Number{41},
+ },
+ },
+ // Oneof nil is the same as not set.
+ {
+ src: &pb.Communique{},
+ dst: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ want: &pb.Communique{
+ Union: &pb.Communique_Name{"Bobby Tables"},
+ },
+ },
+ {
+ src: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true}, // replace
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
+ },
+ },
+ dst: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
+ },
+ },
+ want: &proto3pb.Message{
+ Terrain: map[string]*proto3pb.Nested{
+ "kay_a": &proto3pb.Nested{Cute: true},
+ "kay_b": &proto3pb.Nested{Bunny: "rabbit"},
+ "kay_c": &proto3pb.Nested{Bunny: "bunny"},
+ },
+ },
+ },
+}
+
+func TestMerge(t *testing.T) {
+ for _, m := range mergeTests {
+ got := proto.Clone(m.dst)
+ proto.Merge(got, m.src)
+ if !proto.Equal(got, m.want) {
+ t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
+ }
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/decode.go b/vendor/src/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..aa207298
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/decode_test.go b/vendor/src/github.com/golang/protobuf/proto/decode_test.go
new file mode 100644
index 00000000..b1f13044
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/decode_test.go
@@ -0,0 +1,256 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+var (
+ bytesBlackhole []byte
+ msgBlackhole = new(tpb.Message)
+)
+
+// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
+// 2 bytes long).
+func BenchmarkVarint32ArraySmall(b *testing.B) {
+ for i := uint(1); i <= 10; i++ {
+ dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/encode_test.go b/vendor/src/github.com/golang/protobuf/proto/encode_test.go
new file mode 100644
index 00000000..0b36a0e9
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/encode_test.go
@@ -0,0 +1,83 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ tpb "github.com/golang/protobuf/proto/proto3_proto"
+ "github.com/golang/protobuf/ptypes"
+)
+
+var (
+ blackhole []byte
+)
+
+// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
+// same.
+func BenchmarkAny(b *testing.B) {
+ data := make([]byte, 1<<20)
+ quantum := 1 << 10
+ for i := uint(0); i <= 10; i++ {
+ b.Run(strconv.Itoa(quantum<= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/extensions_test.go b/vendor/src/github.com/golang/protobuf/proto/extensions_test.go
new file mode 100644
index 00000000..b6d9114c
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/extensions_test.go
@@ -0,0 +1,536 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestGetExtensionsWithMissingExtensions(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Fatalf("Could not set ext1: %s", err)
+ }
+ exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
+ pb.E_Ext_More,
+ pb.E_Ext_Text,
+ })
+ if err != nil {
+ t.Fatalf("GetExtensions() failed: %s", err)
+ }
+ if exts[0] != ext1 {
+ t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
+ }
+ if exts[1] != nil {
+ t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
+ }
+}
+
+func TestExtensionDescsWithMissingExtensions(t *testing.T) {
+ msg := &pb.MyMessage{Count: proto.Int32(0)}
+ extdesc1 := pb.E_Ext_More
+ if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {
+ t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err)
+ }
+
+ ext1 := &pb.Ext{}
+ if err := proto.SetExtension(msg, extdesc1, ext1); err != nil {
+ t.Fatalf("Could not set ext1: %s", err)
+ }
+ extdesc2 := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 123456789,
+ Name: "a.b",
+ Tag: "varint,123456789,opt",
+ }
+ ext2 := proto.Bool(false)
+ if err := proto.SetExtension(msg, extdesc2, ext2); err != nil {
+ t.Fatalf("Could not set ext2: %s", err)
+ }
+
+ b, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("Could not marshal msg: %v", err)
+ }
+ if err := proto.Unmarshal(b, msg); err != nil {
+ t.Fatalf("Could not unmarshal into msg: %v", err)
+ }
+
+ descs, err := proto.ExtensionDescs(msg)
+ if err != nil {
+ t.Fatalf("proto.ExtensionDescs: got error %v", err)
+ }
+ sortExtDescs(descs)
+ wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}
+ if !reflect.DeepEqual(descs, wantDescs) {
+ t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs)
+ }
+}
+
+type ExtensionDescSlice []*proto.ExtensionDesc
+
+func (s ExtensionDescSlice) Len() int { return len(s) }
+func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }
+func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func sortExtDescs(s []*proto.ExtensionDesc) {
+ sort.Sort(ExtensionDescSlice(s))
+}
+
+func TestGetExtensionStability(t *testing.T) {
+ check := func(m *pb.MyMessage) bool {
+ ext1, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ ext2, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ return ext1 == ext2
+ }
+ msg := &pb.MyMessage{Count: proto.Int32(4)}
+ ext0 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
+ t.Fatalf("Could not set ext1: %s", ext0)
+ }
+ if !check(msg) {
+ t.Errorf("GetExtension() not stable before marshaling")
+ }
+ bb, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("Marshal() failed: %s", err)
+ }
+ msg1 := &pb.MyMessage{}
+ err = proto.Unmarshal(bb, msg1)
+ if err != nil {
+ t.Fatalf("Unmarshal() failed: %s", err)
+ }
+ if !check(msg1) {
+ t.Errorf("GetExtension() not stable after unmarshaling")
+ }
+}
+
+func TestGetExtensionDefaults(t *testing.T) {
+ var setFloat64 float64 = 1
+ var setFloat32 float32 = 2
+ var setInt32 int32 = 3
+ var setInt64 int64 = 4
+ var setUint32 uint32 = 5
+ var setUint64 uint64 = 6
+ var setBool = true
+ var setBool2 = false
+ var setString = "Goodnight string"
+ var setBytes = []byte("Goodnight bytes")
+ var setEnum = pb.DefaultsMessage_TWO
+
+ type testcase struct {
+ ext *proto.ExtensionDesc // Extension we are testing.
+ want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
+ def interface{} // Expected value of extension after ClearExtension().
+ }
+ tests := []testcase{
+ {pb.E_NoDefaultDouble, setFloat64, nil},
+ {pb.E_NoDefaultFloat, setFloat32, nil},
+ {pb.E_NoDefaultInt32, setInt32, nil},
+ {pb.E_NoDefaultInt64, setInt64, nil},
+ {pb.E_NoDefaultUint32, setUint32, nil},
+ {pb.E_NoDefaultUint64, setUint64, nil},
+ {pb.E_NoDefaultSint32, setInt32, nil},
+ {pb.E_NoDefaultSint64, setInt64, nil},
+ {pb.E_NoDefaultFixed32, setUint32, nil},
+ {pb.E_NoDefaultFixed64, setUint64, nil},
+ {pb.E_NoDefaultSfixed32, setInt32, nil},
+ {pb.E_NoDefaultSfixed64, setInt64, nil},
+ {pb.E_NoDefaultBool, setBool, nil},
+ {pb.E_NoDefaultBool, setBool2, nil},
+ {pb.E_NoDefaultString, setString, nil},
+ {pb.E_NoDefaultBytes, setBytes, nil},
+ {pb.E_NoDefaultEnum, setEnum, nil},
+ {pb.E_DefaultDouble, setFloat64, float64(3.1415)},
+ {pb.E_DefaultFloat, setFloat32, float32(3.14)},
+ {pb.E_DefaultInt32, setInt32, int32(42)},
+ {pb.E_DefaultInt64, setInt64, int64(43)},
+ {pb.E_DefaultUint32, setUint32, uint32(44)},
+ {pb.E_DefaultUint64, setUint64, uint64(45)},
+ {pb.E_DefaultSint32, setInt32, int32(46)},
+ {pb.E_DefaultSint64, setInt64, int64(47)},
+ {pb.E_DefaultFixed32, setUint32, uint32(48)},
+ {pb.E_DefaultFixed64, setUint64, uint64(49)},
+ {pb.E_DefaultSfixed32, setInt32, int32(50)},
+ {pb.E_DefaultSfixed64, setInt64, int64(51)},
+ {pb.E_DefaultBool, setBool, true},
+ {pb.E_DefaultBool, setBool2, true},
+ {pb.E_DefaultString, setString, "Hello, string"},
+ {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
+ {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
+ }
+
+ checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
+ val, err := proto.GetExtension(msg, test.ext)
+ if err != nil {
+ if valWant != nil {
+ return fmt.Errorf("GetExtension(): %s", err)
+ }
+ if want := proto.ErrMissingExtension; err != want {
+ return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
+ }
+ return nil
+ }
+
+ // All proto2 extension values are either a pointer to a value or a slice of values.
+ ty := reflect.TypeOf(val)
+ tyWant := reflect.TypeOf(test.ext.ExtensionType)
+ if got, want := ty, tyWant; got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
+ }
+ tye := ty.Elem()
+ tyeWant := tyWant.Elem()
+ if got, want := tye, tyeWant; got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
+ }
+
+ // Check the name of the type of the value.
+ // If it is an enum it will be type int32 with the name of the enum.
+ if got, want := tye.Name(), tye.Name(); got != want {
+ return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
+ }
+
+ // Check that value is what we expect.
+ // If we have a pointer in val, get the value it points to.
+ valExp := val
+ if ty.Kind() == reflect.Ptr {
+ valExp = reflect.ValueOf(val).Elem().Interface()
+ }
+ if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
+ return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
+ }
+
+ return nil
+ }
+
+ setTo := func(test testcase) interface{} {
+ setTo := reflect.ValueOf(test.want)
+ if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
+ setTo = reflect.New(typ).Elem()
+ setTo.Set(reflect.New(setTo.Type().Elem()))
+ setTo.Elem().Set(reflect.ValueOf(test.want))
+ }
+ return setTo.Interface()
+ }
+
+ for _, test := range tests {
+ msg := &pb.DefaultsMessage{}
+ name := test.ext.Name
+
+ // Check the initial value.
+ if err := checkVal(test, msg, test.def); err != nil {
+ t.Errorf("%s: %v", name, err)
+ }
+
+ // Set the per-type value and check value.
+ name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
+ if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
+ t.Errorf("%s: SetExtension(): %v", name, err)
+ continue
+ }
+ if err := checkVal(test, msg, test.want); err != nil {
+ t.Errorf("%s: %v", name, err)
+ continue
+ }
+
+ // Set and check the value.
+ name += " (cleared)"
+ proto.ClearExtension(msg, test.ext)
+ if err := checkVal(test, msg, test.def); err != nil {
+ t.Errorf("%s: %v", name, err)
+ }
+ }
+}
+
+func TestExtensionsRoundTrip(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{
+ Data: proto.String("hi"),
+ }
+ ext2 := &pb.Ext{
+ Data: proto.String("there"),
+ }
+ exists := proto.HasExtension(msg, pb.E_Ext_More)
+ if exists {
+ t.Error("Extension More present unexpectedly")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Error(err)
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
+ t.Error(err)
+ }
+ e, err := proto.GetExtension(msg, pb.E_Ext_More)
+ if err != nil {
+ t.Error(err)
+ }
+ x, ok := e.(*pb.Ext)
+ if !ok {
+ t.Errorf("e has type %T, expected testdata.Ext", e)
+ } else if *x.Data != "there" {
+ t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
+ }
+ proto.ClearExtension(msg, pb.E_Ext_More)
+ if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
+ t.Errorf("got %v, expected ErrMissingExtension", e)
+ }
+ if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
+ t.Error("expected bad extension error, got nil")
+ }
+ if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
+ t.Error("expected extension err")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
+ t.Error("expected some sort of type mismatch error, got nil")
+ }
+}
+
+func TestNilExtension(t *testing.T) {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(1),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
+ t.Fatal(err)
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
+ t.Error("expected SetExtension to fail due to a nil extension")
+ } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
+ t.Errorf("expected error %v, got %v", want, err)
+ }
+ // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
+ // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
+}
+
+func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
+ // Add a repeated extension to the result.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ // Marshal message with a repeated extension.
+ msg1 := new(pb.OtherMessage)
+ err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension: %v", test.name, err)
+ }
+ b, err := proto.Marshal(msg1)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err = proto.Unmarshal(b, msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_RComplex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.([]*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if !reflect.DeepEqual(ext, test.ext) {
+ t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext)
+ }
+ }
+}
+
+func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
+ // We may see multiple instances of the same extension in the wire
+ // format. For example, the proto compiler may encode custom options in
+ // this way. Here, we verify that we merge the extensions together.
+ tests := []struct {
+ name string
+ ext []*pb.ComplexExtension
+ }{
+ {
+ "two fields",
+ []*pb.ComplexExtension{
+ {First: proto.Int32(7)},
+ {Second: proto.Int32(11)},
+ },
+ },
+ {
+ "repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {Third: []int32{2000}},
+ },
+ },
+ {
+ "two fields and repeated field",
+ []*pb.ComplexExtension{
+ {Third: []int32{1000}},
+ {First: proto.Int32(9)},
+ {Second: proto.Int32(21)},
+ {Third: []int32{2000}},
+ },
+ },
+ }
+ for _, test := range tests {
+ var buf bytes.Buffer
+ var want pb.ComplexExtension
+
+ // Generate a serialized representation of a repeated extension
+ // by catenating bytes together.
+ for i, e := range test.ext {
+ // Merge to create the wanted proto.
+ proto.Merge(&want, e)
+
+ // serialize the message
+ msg := new(pb.OtherMessage)
+ err := proto.SetExtension(msg, pb.E_Complex, e)
+ if err != nil {
+ t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
+ }
+ b, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
+ }
+ buf.Write(b)
+ }
+
+ // Unmarshal and read the merged proto.
+ msg2 := new(pb.OtherMessage)
+ err := proto.Unmarshal(buf.Bytes(), msg2)
+ if err != nil {
+ t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
+ }
+ e, err := proto.GetExtension(msg2, pb.E_Complex)
+ if err != nil {
+ t.Fatalf("[%s] Error getting extension: %v", test.name, err)
+ }
+ ext := e.(*pb.ComplexExtension)
+ if ext == nil {
+ t.Fatalf("[%s] Invalid extension", test.name)
+ }
+ if !reflect.DeepEqual(*ext, want) {
+ t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want)
+ }
+ }
+}
+
+func TestClearAllExtensions(t *testing.T) {
+ // unregistered extension
+ desc := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 101010100,
+ Name: "emptyextension",
+ Tag: "varint,0,opt",
+ }
+ m := &pb.MyMessage{}
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+ if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
+ t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+ if !proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
+ }
+ proto.ClearAllExtensions(m)
+ if proto.HasExtension(m, desc) {
+ t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
+ }
+}
+
+func TestMarshalRace(t *testing.T) {
+ // unregistered extension
+ desc := &proto.ExtensionDesc{
+ ExtendedType: (*pb.MyMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 101010100,
+ Name: "emptyextension",
+ Tag: "varint,0,opt",
+ }
+
+ m := &pb.MyMessage{Count: proto.Int32(4)}
+ if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
+ t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
+ }
+
+ var g errgroup.Group
+ for n := 3; n > 0; n-- {
+ g.Go(func() error {
+ _, err := proto.Marshal(m)
+ return err
+ })
+ }
+ if err := g.Wait(); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/lib.go b/vendor/src/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..ac4ddbc0
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/src/github.com/golang/protobuf/proto/map_test.go b/vendor/src/github.com/golang/protobuf/proto/map_test.go
new file mode 100644
index 00000000..313e8792
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/map_test.go
@@ -0,0 +1,46 @@
+package proto_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ ppb "github.com/golang/protobuf/proto/proto3_proto"
+)
+
+func marshalled() []byte {
+ m := &ppb.IntMaps{}
+ for i := 0; i < 1000; i++ {
+ m.Maps = append(m.Maps, &ppb.IntMap{
+ Rtt: map[int32]int32{1: 2},
+ })
+ }
+ b, err := proto.Marshal(m)
+ if err != nil {
+ panic(fmt.Sprintf("Can't marshal %+v: %v", m, err))
+ }
+ return b
+}
+
+func BenchmarkConcurrentMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+ })
+}
+
+func BenchmarkSequentialMapUnmarshal(b *testing.B) {
+ in := marshalled()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var out ppb.IntMaps
+ if err := proto.Unmarshal(in, &out); err != nil {
+ b.Errorf("Can't unmarshal ppb.IntMaps: %v", err)
+ }
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/message_set.go b/vendor/src/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..fd982dec
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/message_set_test.go b/vendor/src/github.com/golang/protobuf/proto/message_set_test.go
new file mode 100644
index 00000000..353a3ea7
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/message_set_test.go
@@ -0,0 +1,66 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
+ // Check that a repeated message set entry will be concatenated.
+ in := &messageSet{
+ Item: []*_MessageSet_Item{
+ {TypeId: Int32(12345), Message: []byte("hoo")},
+ {TypeId: Int32(12345), Message: []byte("hah")},
+ },
+ }
+ b, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("Marshaled bytes: %q", b)
+
+ var extensions XXX_InternalExtensions
+ if err := UnmarshalMessageSet(b, &extensions); err != nil {
+ t.Fatalf("UnmarshalMessageSet: %v", err)
+ }
+ ext, ok := extensions.p.extensionMap[12345]
+ if !ok {
+ t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap)
+ }
+ // Skip wire type/field number and length varints.
+ got := skipVarint(skipVarint(ext.enc))
+ if want := []byte("hoohah"); !bytes.Equal(got, want) {
+ t.Errorf("Combined extension is %q, want %q", got, want)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..fb512e2e
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..6b5567d4
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/properties.go b/vendor/src/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..ec2289c0
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
new file mode 100644
index 00000000..cc4d0489
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-go.
+// source: proto3_proto/proto3.proto
+// DO NOT EDIT!
+
+/*
+Package proto3_proto is a generated protocol buffer package.
+
+It is generated from these files:
+ proto3_proto/proto3.proto
+
+It has these top-level messages:
+ Message
+ Nested
+ MessageWithMap
+ IntMap
+ IntMaps
+*/
+package proto3_proto
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+import testdata "github.com/golang/protobuf/proto/testdata"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Message_Humour int32
+
+const (
+ Message_UNKNOWN Message_Humour = 0
+ Message_PUNS Message_Humour = 1
+ Message_SLAPSTICK Message_Humour = 2
+ Message_BILL_BAILEY Message_Humour = 3
+)
+
+var Message_Humour_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "PUNS",
+ 2: "SLAPSTICK",
+ 3: "BILL_BAILEY",
+}
+var Message_Humour_value = map[string]int32{
+ "UNKNOWN": 0,
+ "PUNS": 1,
+ "SLAPSTICK": 2,
+ "BILL_BAILEY": 3,
+}
+
+func (x Message_Humour) String() string {
+ return proto.EnumName(Message_Humour_name, int32(x))
+}
+func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
+
+type Message struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
+ HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"`
+ Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+ ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"`
+ TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"`
+ Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
+ Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"`
+ ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"`
+ Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
+ RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"`
+ Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"`
+ Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"`
+ ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"`
+ Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"`
+ Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Message) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Message) GetHilarity() Message_Humour {
+ if m != nil {
+ return m.Hilarity
+ }
+ return Message_UNKNOWN
+}
+
+func (m *Message) GetHeightInCm() uint32 {
+ if m != nil {
+ return m.HeightInCm
+ }
+ return 0
+}
+
+func (m *Message) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *Message) GetResultCount() int64 {
+ if m != nil {
+ return m.ResultCount
+ }
+ return 0
+}
+
+func (m *Message) GetTrueScotsman() bool {
+ if m != nil {
+ return m.TrueScotsman
+ }
+ return false
+}
+
+func (m *Message) GetScore() float32 {
+ if m != nil {
+ return m.Score
+ }
+ return 0
+}
+
+func (m *Message) GetKey() []uint64 {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Message) GetShortKey() []int32 {
+ if m != nil {
+ return m.ShortKey
+ }
+ return nil
+}
+
+func (m *Message) GetNested() *Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *Message) GetRFunny() []Message_Humour {
+ if m != nil {
+ return m.RFunny
+ }
+ return nil
+}
+
+func (m *Message) GetTerrain() map[string]*Nested {
+ if m != nil {
+ return m.Terrain
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Field() *testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Field
+ }
+ return nil
+}
+
+func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
+ if m != nil {
+ return m.Proto2Value
+ }
+ return nil
+}
+
+func (m *Message) GetAnything() *google_protobuf.Any {
+ if m != nil {
+ return m.Anything
+ }
+ return nil
+}
+
+func (m *Message) GetManyThings() []*google_protobuf.Any {
+ if m != nil {
+ return m.ManyThings
+ }
+ return nil
+}
+
+func (m *Message) GetSubmessage() *Message {
+ if m != nil {
+ return m.Submessage
+ }
+ return nil
+}
+
+func (m *Message) GetChildren() []*Message {
+ if m != nil {
+ return m.Children
+ }
+ return nil
+}
+
+type Nested struct {
+ Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
+ Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"`
+}
+
+func (m *Nested) Reset() { *m = Nested{} }
+func (m *Nested) String() string { return proto.CompactTextString(m) }
+func (*Nested) ProtoMessage() {}
+func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Nested) GetBunny() string {
+ if m != nil {
+ return m.Bunny
+ }
+ return ""
+}
+
+func (m *Nested) GetCute() bool {
+ if m != nil {
+ return m.Cute
+ }
+ return false
+}
+
+type MessageWithMap struct {
+ ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+type IntMap struct {
+ Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
+}
+
+func (m *IntMap) Reset() { *m = IntMap{} }
+func (m *IntMap) String() string { return proto.CompactTextString(m) }
+func (*IntMap) ProtoMessage() {}
+func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *IntMap) GetRtt() map[int32]int32 {
+ if m != nil {
+ return m.Rtt
+ }
+ return nil
+}
+
+type IntMaps struct {
+ Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"`
+}
+
+func (m *IntMaps) Reset() { *m = IntMaps{} }
+func (m *IntMaps) String() string { return proto.CompactTextString(m) }
+func (*IntMaps) ProtoMessage() {}
+func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *IntMaps) GetMaps() []*IntMap {
+ if m != nil {
+ return m.Maps
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Message)(nil), "proto3_proto.Message")
+ proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
+ proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
+ proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap")
+ proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps")
+ proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
+}
+
+func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 733 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34,
+ 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a,
+ 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98,
+ 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f,
+ 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3,
+ 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8,
+ 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef,
+ 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7,
+ 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35,
+ 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc,
+ 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5,
+ 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c,
+ 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58,
+ 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61,
+ 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8,
+ 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71,
+ 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a,
+ 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88,
+ 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35,
+ 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63,
+ 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a,
+ 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78,
+ 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2,
+ 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62,
+ 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f,
+ 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11,
+ 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97,
+ 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76,
+ 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8,
+ 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19,
+ 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3,
+ 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0,
+ 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d,
+ 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1,
+ 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9,
+ 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c,
+ 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca,
+ 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51,
+ 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0,
+ 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7,
+ 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05,
+ 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09,
+ 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47,
+ 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91,
+ 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde,
+ 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
new file mode 100644
index 00000000..20486557
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
@@ -0,0 +1,87 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+import "google/protobuf/any.proto";
+import "testdata/test.proto";
+
+package proto3_proto;
+
+message Message {
+ enum Humour {
+ UNKNOWN = 0;
+ PUNS = 1;
+ SLAPSTICK = 2;
+ BILL_BAILEY = 3;
+ }
+
+ string name = 1;
+ Humour hilarity = 2;
+ uint32 height_in_cm = 3;
+ bytes data = 4;
+ int64 result_count = 7;
+ bool true_scotsman = 8;
+ float score = 9;
+
+ repeated uint64 key = 5;
+ repeated int32 short_key = 19;
+ Nested nested = 6;
+ repeated Humour r_funny = 16;
+
+ map terrain = 10;
+ testdata.SubDefaults proto2_field = 11;
+ map proto2_value = 13;
+
+ google.protobuf.Any anything = 14;
+ repeated google.protobuf.Any many_things = 15;
+
+ Message submessage = 17;
+ repeated Message children = 18;
+}
+
+message Nested {
+ string bunny = 1;
+ bool cute = 2;
+}
+
+message MessageWithMap {
+ map byte_mapping = 1;
+}
+
+
+message IntMap {
+ map rtt = 1;
+}
+
+message IntMaps {
+ repeated IntMap maps = 1;
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_test.go b/vendor/src/github.com/golang/protobuf/proto/proto3_test.go
new file mode 100644
index 00000000..735837f2
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/proto3_test.go
@@ -0,0 +1,135 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/proto/proto3_proto"
+ tpb "github.com/golang/protobuf/proto/testdata"
+)
+
+func TestProto3ZeroValues(t *testing.T) {
+ tests := []struct {
+ desc string
+ m proto.Message
+ }{
+ {"zero message", &pb.Message{}},
+ {"empty bytes field", &pb.Message{Data: []byte{}}},
+ }
+ for _, test := range tests {
+ b, err := proto.Marshal(test.m)
+ if err != nil {
+ t.Errorf("%s: proto.Marshal: %v", test.desc, err)
+ continue
+ }
+ if len(b) > 0 {
+ t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
+ }
+ }
+}
+
+func TestRoundTripProto3(t *testing.T) {
+ m := &pb.Message{
+ Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
+ Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
+ HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
+ Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
+ ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
+ TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
+ Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
+
+ Key: []uint64{1, 0xdeadbeef},
+ Nested: &pb.Nested{
+ Bunny: "Monty",
+ },
+ }
+ t.Logf(" m: %v", m)
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ t.Logf(" b: %q", b)
+
+ m2 := new(pb.Message)
+ if err := proto.Unmarshal(b, m2); err != nil {
+ t.Fatalf("proto.Unmarshal: %v", err)
+ }
+ t.Logf("m2: %v", m2)
+
+ if !proto.Equal(m, m2) {
+ t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
+ }
+}
+
+func TestGettersForBasicTypesExist(t *testing.T) {
+ var m pb.Message
+ if got := m.GetNested().GetBunny(); got != "" {
+ t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got)
+ }
+ if got := m.GetNested().GetCute(); got {
+ t.Errorf("m.GetNested().GetCute() = %t, want false", got)
+ }
+}
+
+func TestProto3SetDefaults(t *testing.T) {
+ in := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: new(tpb.SubDefaults),
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": new(tpb.SubDefaults),
+ },
+ }
+
+ got := proto.Clone(in).(*pb.Message)
+ proto.SetDefaults(got)
+
+ // There are no defaults in proto3. Everything should be the zero value, but
+ // we need to remember to set defaults for nested proto2 messages.
+ want := &pb.Message{
+ Terrain: map[string]*pb.Nested{
+ "meadow": new(pb.Nested),
+ },
+ Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
+ Proto2Value: map[string]*tpb.SubDefaults{
+ "badlands": &tpb.SubDefaults{N: proto.Int64(7)},
+ },
+ }
+
+ if !proto.Equal(got, want) {
+ t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/size2_test.go b/vendor/src/github.com/golang/protobuf/proto/size2_test.go
new file mode 100644
index 00000000..a2729c39
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/size2_test.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "testing"
+)
+
+// This is a separate file and package from size_test.go because that one uses
+// generated messages and thus may not be in package proto without having a circular
+// dependency, whereas this file tests unexported details of size.go.
+
+func TestVarintSize(t *testing.T) {
+ // Check the edge cases carefully.
+ testCases := []struct {
+ n uint64
+ size int
+ }{
+ {0, 1},
+ {1, 1},
+ {127, 1},
+ {128, 2},
+ {16383, 2},
+ {16384, 3},
+ {1<<63 - 1, 9},
+ {1 << 63, 10},
+ }
+ for _, tc := range testCases {
+ size := sizeVarint(tc.n)
+ if size != tc.size {
+ t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
+ }
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/size_test.go b/vendor/src/github.com/golang/protobuf/proto/size_test.go
new file mode 100644
index 00000000..af1034dc
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/size_test.go
@@ -0,0 +1,164 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "log"
+ "strings"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
+
+// messageWithExtension2 is in equal_test.go.
+var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+ if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+
+ // Force messageWithExtension3 to have the extension encoded.
+ Marshal(messageWithExtension3)
+
+}
+
+var SizeTests = []struct {
+ desc string
+ pb Message
+}{
+ {"empty", &pb.OtherMessage{}},
+ // Basic types.
+ {"bool", &pb.Defaults{F_Bool: Bool(true)}},
+ {"int32", &pb.Defaults{F_Int32: Int32(12)}},
+ {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
+ {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
+ {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
+ {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
+ {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
+ {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
+ {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
+ {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
+ {"float", &pb.Defaults{F_Float: Float32(12.6)}},
+ {"double", &pb.Defaults{F_Double: Float64(13.9)}},
+ {"string", &pb.Defaults{F_String: String("niles")}},
+ {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
+ {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
+ {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
+ {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
+ {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
+ // Repeated.
+ {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
+ {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
+ {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
+ {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
+ {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
+ {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
+ // Need enough large numbers to verify that the header is counting the number of bytes
+ // for the field, not the number of elements.
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ }}},
+ {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
+ {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
+ // Nested.
+ {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
+ {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
+ // Other things.
+ {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
+ {"extension (unencoded)", messageWithExtension1},
+ {"extension (encoded)", messageWithExtension3},
+ // proto3 message
+ {"proto3 empty", &proto3pb.Message{}},
+ {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
+ {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
+ {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
+ {"proto3 float", &proto3pb.Message{Score: 12.6}},
+ {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
+ {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
+ {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
+ {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+ {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
+
+ {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
+ {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
+ {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
+ {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
+
+ {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
+ {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
+ {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
+
+ {"oneof not set", &pb.Oneof{}},
+ {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
+ {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
+ {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
+ {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
+ {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
+ {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
+ {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
+ {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
+ {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
+ {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
+ {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
+ {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
+ {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
+ {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
+ {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
+ {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
+ {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
+ {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
+ {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
+ {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
+}
+
+func TestSize(t *testing.T) {
+ for _, tc := range SizeTests {
+ size := Size(tc.pb)
+ b, err := Marshal(tc.pb)
+ if err != nil {
+ t.Errorf("%v: Marshal failed: %v", tc.desc, err)
+ continue
+ }
+ if size != len(b) {
+ t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
+ t.Logf("%v: bytes: %#v", tc.desc, b)
+ }
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
new file mode 100644
index 00000000..fc288628
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
@@ -0,0 +1,50 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include ../../Make.protobuf
+
+all: regenerate
+
+regenerate:
+ rm -f test.pb.go
+ make test.pb.go
+
+# The following rules are just aids to development. Not needed for typical testing.
+
+diff: regenerate
+ git diff test.pb.go
+
+restore:
+ cp test.pb.go.golden test.pb.go
+
+preserve:
+ cp test.pb.go test.pb.go.golden
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
new file mode 100644
index 00000000..7172d0e9
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
@@ -0,0 +1,86 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verify that the compiler output for test.proto is unchanged.
+
+package testdata
+
+import (
+ "crypto/sha1"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
+func sum(t *testing.T, name string) string {
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("sum(%q): length is %d", name, len(data))
+ hash := sha1.New()
+ _, err = hash.Write(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return fmt.Sprintf("% x", hash.Sum(nil))
+}
+
+func run(t *testing.T, name string, args ...string) {
+ cmd := exec.Command(name, args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGolden(t *testing.T) {
+ // Compute the original checksum.
+ goldenSum := sum(t, "test.pb.go")
+ // Run the proto compiler.
+ run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
+ newFile := filepath.Join(os.TempDir(), "test.pb.go")
+ defer os.Remove(newFile)
+ // Compute the new checksum.
+ newSum := sum(t, newFile)
+ // Verify
+ if newSum != goldenSum {
+ run(t, "diff", "-u", "test.pb.go", newFile)
+ t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
new file mode 100644
index 00000000..25ffd7a0
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
@@ -0,0 +1,4148 @@
+// Code generated by protoc-gen-go.
+// source: test.proto
+// DO NOT EDIT!
+
+/*
+Package testdata is a generated protocol buffer package.
+
+It is generated from these files:
+ test.proto
+
+It has these top-level messages:
+ GoEnum
+ GoTestField
+ GoTest
+ GoTestRequiredGroupField
+ GoSkipTest
+ NonPackedTest
+ PackedTest
+ MaxTag
+ OldMessage
+ NewMessage
+ InnerMessage
+ OtherMessage
+ RequiredInnerMessage
+ MyMessage
+ Ext
+ ComplexExtension
+ DefaultsMessage
+ MyMessageSet
+ Empty
+ MessageList
+ Strings
+ Defaults
+ SubDefaults
+ RepeatedEnum
+ MoreRepeated
+ GroupOld
+ GroupNew
+ FloatingPoint
+ MessageWithMap
+ Oneof
+ Communique
+*/
+package testdata
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type FOO int32
+
+const (
+ FOO_FOO1 FOO = 1
+)
+
+var FOO_name = map[int32]string{
+ 1: "FOO1",
+}
+var FOO_value = map[string]int32{
+ "FOO1": 1,
+}
+
+func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+}
+func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+}
+func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+}
+func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+// An enum, for completeness.
+type GoTest_KIND int32
+
+const (
+ GoTest_VOID GoTest_KIND = 0
+ // Basic types
+ GoTest_BOOL GoTest_KIND = 1
+ GoTest_BYTES GoTest_KIND = 2
+ GoTest_FINGERPRINT GoTest_KIND = 3
+ GoTest_FLOAT GoTest_KIND = 4
+ GoTest_INT GoTest_KIND = 5
+ GoTest_STRING GoTest_KIND = 6
+ GoTest_TIME GoTest_KIND = 7
+ // Groupings
+ GoTest_TUPLE GoTest_KIND = 8
+ GoTest_ARRAY GoTest_KIND = 9
+ GoTest_MAP GoTest_KIND = 10
+ // Table types
+ GoTest_TABLE GoTest_KIND = 11
+ // Functions
+ GoTest_FUNCTION GoTest_KIND = 12
+)
+
+var GoTest_KIND_name = map[int32]string{
+ 0: "VOID",
+ 1: "BOOL",
+ 2: "BYTES",
+ 3: "FINGERPRINT",
+ 4: "FLOAT",
+ 5: "INT",
+ 6: "STRING",
+ 7: "TIME",
+ 8: "TUPLE",
+ 9: "ARRAY",
+ 10: "MAP",
+ 11: "TABLE",
+ 12: "FUNCTION",
+}
+var GoTest_KIND_value = map[string]int32{
+ "VOID": 0,
+ "BOOL": 1,
+ "BYTES": 2,
+ "FINGERPRINT": 3,
+ "FLOAT": 4,
+ "INT": 5,
+ "STRING": 6,
+ "TIME": 7,
+ "TUPLE": 8,
+ "ARRAY": 9,
+ "MAP": 10,
+ "TABLE": 11,
+ "FUNCTION": 12,
+}
+
+func (x GoTest_KIND) Enum() *GoTest_KIND {
+ p := new(GoTest_KIND)
+ *p = x
+ return p
+}
+func (x GoTest_KIND) String() string {
+ return proto.EnumName(GoTest_KIND_name, int32(x))
+}
+func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
+ if err != nil {
+ return err
+ }
+ *x = GoTest_KIND(value)
+ return nil
+}
+func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+type MyMessage_Color int32
+
+const (
+ MyMessage_RED MyMessage_Color = 0
+ MyMessage_GREEN MyMessage_Color = 1
+ MyMessage_BLUE MyMessage_Color = 2
+)
+
+var MyMessage_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var MyMessage_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x MyMessage_Color) Enum() *MyMessage_Color {
+ p := new(MyMessage_Color)
+ *p = x
+ return p
+}
+func (x MyMessage_Color) String() string {
+ return proto.EnumName(MyMessage_Color_name, int32(x))
+}
+func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
+ if err != nil {
+ return err
+ }
+ *x = MyMessage_Color(value)
+ return nil
+}
+func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+type DefaultsMessage_DefaultsEnum int32
+
+const (
+ DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0
+ DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1
+ DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2
+)
+
+var DefaultsMessage_DefaultsEnum_name = map[int32]string{
+ 0: "ZERO",
+ 1: "ONE",
+ 2: "TWO",
+}
+var DefaultsMessage_DefaultsEnum_value = map[string]int32{
+ "ZERO": 0,
+ "ONE": 1,
+ "TWO": 2,
+}
+
+func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum {
+ p := new(DefaultsMessage_DefaultsEnum)
+ *p = x
+ return p
+}
+func (x DefaultsMessage_DefaultsEnum) String() string {
+ return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x))
+}
+func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum")
+ if err != nil {
+ return err
+ }
+ *x = DefaultsMessage_DefaultsEnum(value)
+ return nil
+}
+func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{16, 0}
+}
+
+type Defaults_Color int32
+
+const (
+ Defaults_RED Defaults_Color = 0
+ Defaults_GREEN Defaults_Color = 1
+ Defaults_BLUE Defaults_Color = 2
+)
+
+var Defaults_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Defaults_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Defaults_Color) Enum() *Defaults_Color {
+ p := new(Defaults_Color)
+ *p = x
+ return p
+}
+func (x Defaults_Color) String() string {
+ return proto.EnumName(Defaults_Color_name, int32(x))
+}
+func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
+ if err != nil {
+ return err
+ }
+ *x = Defaults_Color(value)
+ return nil
+}
+func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} }
+
+type RepeatedEnum_Color int32
+
+const (
+ RepeatedEnum_RED RepeatedEnum_Color = 1
+)
+
+var RepeatedEnum_Color_name = map[int32]string{
+ 1: "RED",
+}
+var RepeatedEnum_Color_value = map[string]int32{
+ "RED": 1,
+}
+
+func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
+ p := new(RepeatedEnum_Color)
+ *p = x
+ return p
+}
+func (x RepeatedEnum_Color) String() string {
+ return proto.EnumName(RepeatedEnum_Color_name, int32(x))
+}
+func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
+ if err != nil {
+ return err
+ }
+ *x = RepeatedEnum_Color(value)
+ return nil
+}
+func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} }
+
+type GoEnum struct {
+ Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoEnum) Reset() { *m = GoEnum{} }
+func (m *GoEnum) String() string { return proto.CompactTextString(m) }
+func (*GoEnum) ProtoMessage() {}
+func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *GoEnum) GetFoo() FOO {
+ if m != nil && m.Foo != nil {
+ return *m.Foo
+ }
+ return FOO_FOO1
+}
+
+type GoTestField struct {
+ Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"`
+ Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestField) Reset() { *m = GoTestField{} }
+func (m *GoTestField) String() string { return proto.CompactTextString(m) }
+func (*GoTestField) ProtoMessage() {}
+func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *GoTestField) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+}
+
+func (m *GoTestField) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+type GoTest struct {
+ // Some typical parameters
+ Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
+ Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"`
+ Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"`
+ // Required, repeated and optional foreign fields.
+ RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"`
+ RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"`
+ OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"`
+ // Required fields of all basic types
+ F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"`
+ F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"`
+ F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"`
+ F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"`
+ F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"`
+ F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"`
+ F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"`
+ F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"`
+ F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"`
+ F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"`
+ F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"`
+ F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"`
+ F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"`
+ // Repeated fields of all basic types
+ F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"`
+ F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"`
+ F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"`
+ F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"`
+ F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"`
+ F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"`
+ F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"`
+ F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"`
+ F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"`
+ F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"`
+ F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"`
+ F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"`
+ F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"`
+ // Optional fields of all basic types
+ F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"`
+ F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"`
+ F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"`
+ F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"`
+ F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"`
+ F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"`
+ F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"`
+ F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"`
+ F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"`
+ F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"`
+ F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"`
+ F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"`
+ F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"`
+ // Default-valued fields of all basic types
+ F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"`
+ F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
+ F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
+ F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
+ F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
+ F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
+ F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
+ F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"`
+ F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"`
+ F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
+ F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
+ F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
+ F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
+ // Packed repeated fields (no string or bytes).
+ F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"`
+ F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"`
+ F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"`
+ F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"`
+ F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"`
+ F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"`
+ F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"`
+ F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"`
+ F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"`
+ F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"`
+ F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"`
+ Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"`
+ Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"`
+ Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest) Reset() { *m = GoTest{} }
+func (m *GoTest) String() string { return proto.CompactTextString(m) }
+func (*GoTest) ProtoMessage() {}
+func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+const Default_GoTest_F_BoolDefaulted bool = true
+const Default_GoTest_F_Int32Defaulted int32 = 32
+const Default_GoTest_F_Int64Defaulted int64 = 64
+const Default_GoTest_F_Fixed32Defaulted uint32 = 320
+const Default_GoTest_F_Fixed64Defaulted uint64 = 640
+const Default_GoTest_F_Uint32Defaulted uint32 = 3200
+const Default_GoTest_F_Uint64Defaulted uint64 = 6400
+const Default_GoTest_F_FloatDefaulted float32 = 314159
+const Default_GoTest_F_DoubleDefaulted float64 = 271828
+const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
+
+var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
+
+const Default_GoTest_F_Sint32Defaulted int32 = -32
+const Default_GoTest_F_Sint64Defaulted int64 = -64
+
+func (m *GoTest) GetKind() GoTest_KIND {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return GoTest_VOID
+}
+
+func (m *GoTest) GetTable() string {
+ if m != nil && m.Table != nil {
+ return *m.Table
+ }
+ return ""
+}
+
+func (m *GoTest) GetParam() int32 {
+ if m != nil && m.Param != nil {
+ return *m.Param
+ }
+ return 0
+}
+
+func (m *GoTest) GetRequiredField() *GoTestField {
+ if m != nil {
+ return m.RequiredField
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedField() []*GoTestField {
+ if m != nil {
+ return m.RepeatedField
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalField() *GoTestField {
+ if m != nil {
+ return m.OptionalField
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolRequired() bool {
+ if m != nil && m.F_BoolRequired != nil {
+ return *m.F_BoolRequired
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Required() int32 {
+ if m != nil && m.F_Int32Required != nil {
+ return *m.F_Int32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Required() int64 {
+ if m != nil && m.F_Int64Required != nil {
+ return *m.F_Int64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Required() uint32 {
+ if m != nil && m.F_Fixed32Required != nil {
+ return *m.F_Fixed32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Required() uint64 {
+ if m != nil && m.F_Fixed64Required != nil {
+ return *m.F_Fixed64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Required() uint32 {
+ if m != nil && m.F_Uint32Required != nil {
+ return *m.F_Uint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Required() uint64 {
+ if m != nil && m.F_Uint64Required != nil {
+ return *m.F_Uint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatRequired() float32 {
+ if m != nil && m.F_FloatRequired != nil {
+ return *m.F_FloatRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleRequired() float64 {
+ if m != nil && m.F_DoubleRequired != nil {
+ return *m.F_DoubleRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringRequired() string {
+ if m != nil && m.F_StringRequired != nil {
+ return *m.F_StringRequired
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesRequired() []byte {
+ if m != nil {
+ return m.F_BytesRequired
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Required() int32 {
+ if m != nil && m.F_Sint32Required != nil {
+ return *m.F_Sint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Required() int64 {
+ if m != nil && m.F_Sint64Required != nil {
+ return *m.F_Sint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolRepeated() []bool {
+ if m != nil {
+ return m.F_BoolRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32Repeated() []int32 {
+ if m != nil {
+ return m.F_Int32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64Repeated() []int64 {
+ if m != nil {
+ return m.F_Int64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Fixed32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Fixed64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Uint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Uint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeated() []float32 {
+ if m != nil {
+ return m.F_FloatRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeated() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_StringRepeated() []string {
+ if m != nil {
+ return m.F_StringRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BytesRepeated() [][]byte {
+ if m != nil {
+ return m.F_BytesRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Repeated() []int32 {
+ if m != nil {
+ return m.F_Sint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64Repeated() []int64 {
+ if m != nil {
+ return m.F_Sint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolOptional() bool {
+ if m != nil && m.F_BoolOptional != nil {
+ return *m.F_BoolOptional
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Optional() int32 {
+ if m != nil && m.F_Int32Optional != nil {
+ return *m.F_Int32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Optional() int64 {
+ if m != nil && m.F_Int64Optional != nil {
+ return *m.F_Int64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Optional() uint32 {
+ if m != nil && m.F_Fixed32Optional != nil {
+ return *m.F_Fixed32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Optional() uint64 {
+ if m != nil && m.F_Fixed64Optional != nil {
+ return *m.F_Fixed64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Optional() uint32 {
+ if m != nil && m.F_Uint32Optional != nil {
+ return *m.F_Uint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Optional() uint64 {
+ if m != nil && m.F_Uint64Optional != nil {
+ return *m.F_Uint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatOptional() float32 {
+ if m != nil && m.F_FloatOptional != nil {
+ return *m.F_FloatOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleOptional() float64 {
+ if m != nil && m.F_DoubleOptional != nil {
+ return *m.F_DoubleOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringOptional() string {
+ if m != nil && m.F_StringOptional != nil {
+ return *m.F_StringOptional
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesOptional() []byte {
+ if m != nil {
+ return m.F_BytesOptional
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Optional() int32 {
+ if m != nil && m.F_Sint32Optional != nil {
+ return *m.F_Sint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Optional() int64 {
+ if m != nil && m.F_Sint64Optional != nil {
+ return *m.F_Sint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolDefaulted() bool {
+ if m != nil && m.F_BoolDefaulted != nil {
+ return *m.F_BoolDefaulted
+ }
+ return Default_GoTest_F_BoolDefaulted
+}
+
+func (m *GoTest) GetF_Int32Defaulted() int32 {
+ if m != nil && m.F_Int32Defaulted != nil {
+ return *m.F_Int32Defaulted
+ }
+ return Default_GoTest_F_Int32Defaulted
+}
+
+func (m *GoTest) GetF_Int64Defaulted() int64 {
+ if m != nil && m.F_Int64Defaulted != nil {
+ return *m.F_Int64Defaulted
+ }
+ return Default_GoTest_F_Int64Defaulted
+}
+
+func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
+ if m != nil && m.F_Fixed32Defaulted != nil {
+ return *m.F_Fixed32Defaulted
+ }
+ return Default_GoTest_F_Fixed32Defaulted
+}
+
+func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
+ if m != nil && m.F_Fixed64Defaulted != nil {
+ return *m.F_Fixed64Defaulted
+ }
+ return Default_GoTest_F_Fixed64Defaulted
+}
+
+func (m *GoTest) GetF_Uint32Defaulted() uint32 {
+ if m != nil && m.F_Uint32Defaulted != nil {
+ return *m.F_Uint32Defaulted
+ }
+ return Default_GoTest_F_Uint32Defaulted
+}
+
+func (m *GoTest) GetF_Uint64Defaulted() uint64 {
+ if m != nil && m.F_Uint64Defaulted != nil {
+ return *m.F_Uint64Defaulted
+ }
+ return Default_GoTest_F_Uint64Defaulted
+}
+
+func (m *GoTest) GetF_FloatDefaulted() float32 {
+ if m != nil && m.F_FloatDefaulted != nil {
+ return *m.F_FloatDefaulted
+ }
+ return Default_GoTest_F_FloatDefaulted
+}
+
+func (m *GoTest) GetF_DoubleDefaulted() float64 {
+ if m != nil && m.F_DoubleDefaulted != nil {
+ return *m.F_DoubleDefaulted
+ }
+ return Default_GoTest_F_DoubleDefaulted
+}
+
+func (m *GoTest) GetF_StringDefaulted() string {
+ if m != nil && m.F_StringDefaulted != nil {
+ return *m.F_StringDefaulted
+ }
+ return Default_GoTest_F_StringDefaulted
+}
+
+func (m *GoTest) GetF_BytesDefaulted() []byte {
+ if m != nil && m.F_BytesDefaulted != nil {
+ return m.F_BytesDefaulted
+ }
+ return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
+}
+
+func (m *GoTest) GetF_Sint32Defaulted() int32 {
+ if m != nil && m.F_Sint32Defaulted != nil {
+ return *m.F_Sint32Defaulted
+ }
+ return Default_GoTest_F_Sint32Defaulted
+}
+
+func (m *GoTest) GetF_Sint64Defaulted() int64 {
+ if m != nil && m.F_Sint64Defaulted != nil {
+ return *m.F_Sint64Defaulted
+ }
+ return Default_GoTest_F_Sint64Defaulted
+}
+
+func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
+ if m != nil {
+ return m.F_BoolRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Int32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Int64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Fixed32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Fixed64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Uint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Uint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
+ if m != nil {
+ return m.F_FloatRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Sint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Sint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
+ if m != nil {
+ return m.Requiredgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
+ if m != nil {
+ return m.Repeatedgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+}
+
+// Required, repeated, and optional groups.
+type GoTest_RequiredGroup struct {
+ RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
+func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RequiredGroup) ProtoMessage() {}
+func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+
+func (m *GoTest_RequiredGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_RepeatedGroup struct {
+ RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
+func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RepeatedGroup) ProtoMessage() {}
+func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
+
+func (m *GoTest_RepeatedGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
+func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_OptionalGroup) ProtoMessage() {}
+func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} }
+
+func (m *GoTest_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+// For testing a group containing a required field.
+type GoTestRequiredGroupField struct {
+ Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} }
+func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) }
+func (*GoTestRequiredGroupField) ProtoMessage() {}
+func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group {
+ if m != nil {
+ return m.Group
+ }
+ return nil
+}
+
+type GoTestRequiredGroupField_Group struct {
+ Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} }
+func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) }
+func (*GoTestRequiredGroupField_Group) ProtoMessage() {}
+func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{3, 0}
+}
+
+func (m *GoTestRequiredGroupField_Group) GetField() int32 {
+ if m != nil && m.Field != nil {
+ return *m.Field
+ }
+ return 0
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+type GoSkipTest struct {
+ SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"`
+ SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"`
+ SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"`
+ SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"`
+ Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
+func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest) ProtoMessage() {}
+func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *GoSkipTest) GetSkipInt32() int32 {
+ if m != nil && m.SkipInt32 != nil {
+ return *m.SkipInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed32() uint32 {
+ if m != nil && m.SkipFixed32 != nil {
+ return *m.SkipFixed32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed64() uint64 {
+ if m != nil && m.SkipFixed64 != nil {
+ return *m.SkipFixed64
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipString() string {
+ if m != nil && m.SkipString != nil {
+ return *m.SkipString
+ }
+ return ""
+}
+
+func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
+ if m != nil {
+ return m.Skipgroup
+ }
+ return nil
+}
+
+type GoSkipTest_SkipGroup struct {
+ GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"`
+ GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
+func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest_SkipGroup) ProtoMessage() {}
+func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
+
+func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
+ if m != nil && m.GroupInt32 != nil {
+ return *m.GroupInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest_SkipGroup) GetGroupString() string {
+ if m != nil && m.GroupString != nil {
+ return *m.GroupString
+ }
+ return ""
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+type NonPackedTest struct {
+ A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
+func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
+func (*NonPackedTest) ProtoMessage() {}
+func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *NonPackedTest) GetA() []int32 {
+ if m != nil {
+ return m.A
+ }
+ return nil
+}
+
+type PackedTest struct {
+ B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PackedTest) Reset() { *m = PackedTest{} }
+func (m *PackedTest) String() string { return proto.CompactTextString(m) }
+func (*PackedTest) ProtoMessage() {}
+func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *PackedTest) GetB() []int32 {
+ if m != nil {
+ return m.B
+ }
+ return nil
+}
+
+type MaxTag struct {
+ // Maximum possible tag number.
+ LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MaxTag) Reset() { *m = MaxTag{} }
+func (m *MaxTag) String() string { return proto.CompactTextString(m) }
+func (*MaxTag) ProtoMessage() {}
+func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *MaxTag) GetLastField() string {
+ if m != nil && m.LastField != nil {
+ return *m.LastField
+ }
+ return ""
+}
+
+type OldMessage struct {
+ Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage) Reset() { *m = OldMessage{} }
+func (m *OldMessage) String() string { return proto.CompactTextString(m) }
+func (*OldMessage) ProtoMessage() {}
+func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *OldMessage) GetNested() *OldMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *OldMessage) GetNum() int32 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type OldMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
+func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*OldMessage_Nested) ProtoMessage() {}
+func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
+
+func (m *OldMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+type NewMessage struct {
+ Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ // This is an int32 in OldMessage.
+ Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage) Reset() { *m = NewMessage{} }
+func (m *NewMessage) String() string { return proto.CompactTextString(m) }
+func (*NewMessage) ProtoMessage() {}
+func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *NewMessage) GetNested() *NewMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *NewMessage) GetNum() int64 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type NewMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
+func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*NewMessage_Nested) ProtoMessage() {}
+func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+
+func (m *NewMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *NewMessage_Nested) GetFoodGroup() string {
+ if m != nil && m.FoodGroup != nil {
+ return *m.FoodGroup
+ }
+ return ""
+}
+
+type InnerMessage struct {
+ Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
+ Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InnerMessage) Reset() { *m = InnerMessage{} }
+func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
+func (*InnerMessage) ProtoMessage() {}
+func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+const Default_InnerMessage_Port int32 = 4000
+
+func (m *InnerMessage) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *InnerMessage) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return Default_InnerMessage_Port
+}
+
+func (m *InnerMessage) GetConnected() bool {
+ if m != nil && m.Connected != nil {
+ return *m.Connected
+ }
+ return false
+}
+
+type OtherMessage struct {
+ Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherMessage) Reset() { *m = OtherMessage{} }
+func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
+func (*OtherMessage) ProtoMessage() {}
+func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+var extRange_OtherMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OtherMessage
+}
+
+func (m *OtherMessage) GetKey() int64 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *OtherMessage) GetWeight() float32 {
+ if m != nil && m.Weight != nil {
+ return *m.Weight
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+type RequiredInnerMessage struct {
+ LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} }
+func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) }
+func (*RequiredInnerMessage) ProtoMessage() {}
+func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage {
+ if m != nil {
+ return m.LeoFinallyWonAnOscar
+ }
+ return nil
+}
+
+type MyMessage struct {
+ Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
+ Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
+ Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
+ Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
+ WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"`
+ RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"`
+ Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
+ Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
+ // This field becomes [][]byte in the generated code.
+ RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"`
+ Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage) Reset() { *m = MyMessage{} }
+func (m *MyMessage) String() string { return proto.CompactTextString(m) }
+func (*MyMessage) ProtoMessage() {}
+func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+var extRange_MyMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessage
+}
+
+func (m *MyMessage) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *MyMessage) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MyMessage) GetQuote() string {
+ if m != nil && m.Quote != nil {
+ return *m.Quote
+ }
+ return ""
+}
+
+func (m *MyMessage) GetPet() []string {
+ if m != nil {
+ return m.Pet
+ }
+ return nil
+}
+
+func (m *MyMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetOthers() []*OtherMessage {
+ if m != nil {
+ return m.Others
+ }
+ return nil
+}
+
+func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage {
+ if m != nil {
+ return m.WeMustGoDeeper
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepInner() []*InnerMessage {
+ if m != nil {
+ return m.RepInner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBikeshed() MyMessage_Color {
+ if m != nil && m.Bikeshed != nil {
+ return *m.Bikeshed
+ }
+ return MyMessage_RED
+}
+
+func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepBytes() [][]byte {
+ if m != nil {
+ return m.RepBytes
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBigfloat() float64 {
+ if m != nil && m.Bigfloat != nil {
+ return *m.Bigfloat
+ }
+ return 0
+}
+
+type MyMessage_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
+func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*MyMessage_SomeGroup) ProtoMessage() {}
+func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
+
+func (m *MyMessage_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Ext struct {
+ Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Ext) Reset() { *m = Ext{} }
+func (m *Ext) String() string { return proto.CompactTextString(m) }
+func (*Ext) ProtoMessage() {}
+func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *Ext) GetData() string {
+ if m != nil && m.Data != nil {
+ return *m.Data
+ }
+ return ""
+}
+
+var E_Ext_More = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*Ext)(nil),
+ Field: 103,
+ Name: "testdata.Ext.more",
+ Tag: "bytes,103,opt,name=more",
+ Filename: "test.proto",
+}
+
+var E_Ext_Text = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 104,
+ Name: "testdata.Ext.text",
+ Tag: "bytes,104,opt,name=text",
+ Filename: "test.proto",
+}
+
+var E_Ext_Number = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 105,
+ Name: "testdata.Ext.number",
+ Tag: "varint,105,opt,name=number",
+ Filename: "test.proto",
+}
+
+type ComplexExtension struct {
+ First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"`
+ Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"`
+ Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ComplexExtension) Reset() { *m = ComplexExtension{} }
+func (m *ComplexExtension) String() string { return proto.CompactTextString(m) }
+func (*ComplexExtension) ProtoMessage() {}
+func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *ComplexExtension) GetFirst() int32 {
+ if m != nil && m.First != nil {
+ return *m.First
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetSecond() int32 {
+ if m != nil && m.Second != nil {
+ return *m.Second
+ }
+ return 0
+}
+
+func (m *ComplexExtension) GetThird() []int32 {
+ if m != nil {
+ return m.Third
+ }
+ return nil
+}
+
+type DefaultsMessage struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} }
+func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) }
+func (*DefaultsMessage) ProtoMessage() {}
+func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+var extRange_DefaultsMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_DefaultsMessage
+}
+
+type MyMessageSet struct {
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
+func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
+func (*MyMessageSet) ProtoMessage() {}
+func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *MyMessageSet) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(&m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions)
+}
+func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)
+}
+
+// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*MyMessageSet)(nil)
+var _ proto.Unmarshaler = (*MyMessageSet)(nil)
+
+var extRange_MyMessageSet = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessageSet
+}
+
+type Empty struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+type MessageList struct {
+ Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList) Reset() { *m = MessageList{} }
+func (m *MessageList) String() string { return proto.CompactTextString(m) }
+func (*MessageList) ProtoMessage() {}
+func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *MessageList) GetMessage() []*MessageList_Message {
+ if m != nil {
+ return m.Message
+ }
+ return nil
+}
+
+type MessageList_Message struct {
+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
+func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
+func (*MessageList_Message) ProtoMessage() {}
+func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
+
+func (m *MessageList_Message) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MessageList_Message) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type Strings struct {
+ StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"`
+ BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Strings) Reset() { *m = Strings{} }
+func (m *Strings) String() string { return proto.CompactTextString(m) }
+func (*Strings) ProtoMessage() {}
+func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *Strings) GetStringField() string {
+ if m != nil && m.StringField != nil {
+ return *m.StringField
+ }
+ return ""
+}
+
+func (m *Strings) GetBytesField() []byte {
+ if m != nil {
+ return m.BytesField
+ }
+ return nil
+}
+
+type Defaults struct {
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"`
+ F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"`
+ F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"`
+ F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"`
+ F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"`
+ F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"`
+ F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"`
+ F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"`
+ F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"`
+ F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"`
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"`
+ F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"`
+ F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"`
+ F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
+ // More fields with crazy defaults.
+ F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"`
+ F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"`
+ F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"`
+ // Sub-message.
+ Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
+ // Redundant but explicit defaults.
+ StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Defaults) Reset() { *m = Defaults{} }
+func (m *Defaults) String() string { return proto.CompactTextString(m) }
+func (*Defaults) ProtoMessage() {}
+func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+const Default_Defaults_F_Bool bool = true
+const Default_Defaults_F_Int32 int32 = 32
+const Default_Defaults_F_Int64 int64 = 64
+const Default_Defaults_F_Fixed32 uint32 = 320
+const Default_Defaults_F_Fixed64 uint64 = 640
+const Default_Defaults_F_Uint32 uint32 = 3200
+const Default_Defaults_F_Uint64 uint64 = 6400
+const Default_Defaults_F_Float float32 = 314159
+const Default_Defaults_F_Double float64 = 271828
+const Default_Defaults_F_String string = "hello, \"world!\"\n"
+
+var Default_Defaults_F_Bytes []byte = []byte("Bignose")
+
+const Default_Defaults_F_Sint32 int32 = -32
+const Default_Defaults_F_Sint64 int64 = -64
+const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
+
+var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
+var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
+var Default_Defaults_F_Nan float32 = float32(math.NaN())
+
+func (m *Defaults) GetF_Bool() bool {
+ if m != nil && m.F_Bool != nil {
+ return *m.F_Bool
+ }
+ return Default_Defaults_F_Bool
+}
+
+func (m *Defaults) GetF_Int32() int32 {
+ if m != nil && m.F_Int32 != nil {
+ return *m.F_Int32
+ }
+ return Default_Defaults_F_Int32
+}
+
+func (m *Defaults) GetF_Int64() int64 {
+ if m != nil && m.F_Int64 != nil {
+ return *m.F_Int64
+ }
+ return Default_Defaults_F_Int64
+}
+
+func (m *Defaults) GetF_Fixed32() uint32 {
+ if m != nil && m.F_Fixed32 != nil {
+ return *m.F_Fixed32
+ }
+ return Default_Defaults_F_Fixed32
+}
+
+func (m *Defaults) GetF_Fixed64() uint64 {
+ if m != nil && m.F_Fixed64 != nil {
+ return *m.F_Fixed64
+ }
+ return Default_Defaults_F_Fixed64
+}
+
+func (m *Defaults) GetF_Uint32() uint32 {
+ if m != nil && m.F_Uint32 != nil {
+ return *m.F_Uint32
+ }
+ return Default_Defaults_F_Uint32
+}
+
+func (m *Defaults) GetF_Uint64() uint64 {
+ if m != nil && m.F_Uint64 != nil {
+ return *m.F_Uint64
+ }
+ return Default_Defaults_F_Uint64
+}
+
+func (m *Defaults) GetF_Float() float32 {
+ if m != nil && m.F_Float != nil {
+ return *m.F_Float
+ }
+ return Default_Defaults_F_Float
+}
+
+func (m *Defaults) GetF_Double() float64 {
+ if m != nil && m.F_Double != nil {
+ return *m.F_Double
+ }
+ return Default_Defaults_F_Double
+}
+
+func (m *Defaults) GetF_String() string {
+ if m != nil && m.F_String != nil {
+ return *m.F_String
+ }
+ return Default_Defaults_F_String
+}
+
+func (m *Defaults) GetF_Bytes() []byte {
+ if m != nil && m.F_Bytes != nil {
+ return m.F_Bytes
+ }
+ return append([]byte(nil), Default_Defaults_F_Bytes...)
+}
+
+func (m *Defaults) GetF_Sint32() int32 {
+ if m != nil && m.F_Sint32 != nil {
+ return *m.F_Sint32
+ }
+ return Default_Defaults_F_Sint32
+}
+
+func (m *Defaults) GetF_Sint64() int64 {
+ if m != nil && m.F_Sint64 != nil {
+ return *m.F_Sint64
+ }
+ return Default_Defaults_F_Sint64
+}
+
+func (m *Defaults) GetF_Enum() Defaults_Color {
+ if m != nil && m.F_Enum != nil {
+ return *m.F_Enum
+ }
+ return Default_Defaults_F_Enum
+}
+
+func (m *Defaults) GetF_Pinf() float32 {
+ if m != nil && m.F_Pinf != nil {
+ return *m.F_Pinf
+ }
+ return Default_Defaults_F_Pinf
+}
+
+func (m *Defaults) GetF_Ninf() float32 {
+ if m != nil && m.F_Ninf != nil {
+ return *m.F_Ninf
+ }
+ return Default_Defaults_F_Ninf
+}
+
+func (m *Defaults) GetF_Nan() float32 {
+ if m != nil && m.F_Nan != nil {
+ return *m.F_Nan
+ }
+ return Default_Defaults_F_Nan
+}
+
+func (m *Defaults) GetSub() *SubDefaults {
+ if m != nil {
+ return m.Sub
+ }
+ return nil
+}
+
+func (m *Defaults) GetStrZero() string {
+ if m != nil && m.StrZero != nil {
+ return *m.StrZero
+ }
+ return ""
+}
+
+type SubDefaults struct {
+ N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SubDefaults) Reset() { *m = SubDefaults{} }
+func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
+func (*SubDefaults) ProtoMessage() {}
+func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+const Default_SubDefaults_N int64 = 7
+
+func (m *SubDefaults) GetN() int64 {
+ if m != nil && m.N != nil {
+ return *m.N
+ }
+ return Default_SubDefaults_N
+}
+
+type RepeatedEnum struct {
+ Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
+func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
+func (*RepeatedEnum) ProtoMessage() {}
+func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
+ if m != nil {
+ return m.Color
+ }
+ return nil
+}
+
+type MoreRepeated struct {
+ Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
+ BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"`
+ Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
+ IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"`
+ Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"`
+ Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
+ Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
+func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
+func (*MoreRepeated) ProtoMessage() {}
+func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *MoreRepeated) GetBools() []bool {
+ if m != nil {
+ return m.Bools
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetBoolsPacked() []bool {
+ if m != nil {
+ return m.BoolsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInts() []int32 {
+ if m != nil {
+ return m.Ints
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetIntsPacked() []int32 {
+ if m != nil {
+ return m.IntsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInt64SPacked() []int64 {
+ if m != nil {
+ return m.Int64SPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetStrings() []string {
+ if m != nil {
+ return m.Strings
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetFixeds() []uint32 {
+ if m != nil {
+ return m.Fixeds
+ }
+ return nil
+}
+
+type GroupOld struct {
+ G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld) Reset() { *m = GroupOld{} }
+func (m *GroupOld) String() string { return proto.CompactTextString(m) }
+func (*GroupOld) ProtoMessage() {}
+func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *GroupOld) GetG() *GroupOld_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupOld_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
+func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
+func (*GroupOld_G) ProtoMessage() {}
+func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} }
+
+func (m *GroupOld_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type GroupNew struct {
+ G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew) Reset() { *m = GroupNew{} }
+func (m *GroupNew) String() string { return proto.CompactTextString(m) }
+func (*GroupNew) ProtoMessage() {}
+func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *GroupNew) GetG() *GroupNew_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupNew_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
+func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
+func (*GroupNew_G) ProtoMessage() {}
+func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} }
+
+func (m *GroupNew_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *GroupNew_G) GetY() int32 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type FloatingPoint struct {
+ F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
+ Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
+func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
+func (*FloatingPoint) ProtoMessage() {}
+func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+func (m *FloatingPoint) GetF() float64 {
+ if m != nil && m.F != nil {
+ return *m.F
+ }
+ return 0
+}
+
+func (m *FloatingPoint) GetExact() bool {
+ if m != nil && m.Exact != nil {
+ return *m.Exact
+ }
+ return false
+}
+
+type MessageWithMap struct {
+ NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *MessageWithMap) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetStrToStr() map[string]string {
+ if m != nil {
+ return m.StrToStr
+ }
+ return nil
+}
+
+type Oneof struct {
+ // Types that are valid to be assigned to Union:
+ // *Oneof_F_Bool
+ // *Oneof_F_Int32
+ // *Oneof_F_Int64
+ // *Oneof_F_Fixed32
+ // *Oneof_F_Fixed64
+ // *Oneof_F_Uint32
+ // *Oneof_F_Uint64
+ // *Oneof_F_Float
+ // *Oneof_F_Double
+ // *Oneof_F_String
+ // *Oneof_F_Bytes
+ // *Oneof_F_Sint32
+ // *Oneof_F_Sint64
+ // *Oneof_F_Enum
+ // *Oneof_F_Message
+ // *Oneof_FGroup
+ // *Oneof_F_Largest_Tag
+ Union isOneof_Union `protobuf_oneof:"union"`
+ // Types that are valid to be assigned to Tormato:
+ // *Oneof_Value
+ Tormato isOneof_Tormato `protobuf_oneof:"tormato"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof) Reset() { *m = Oneof{} }
+func (m *Oneof) String() string { return proto.CompactTextString(m) }
+func (*Oneof) ProtoMessage() {}
+func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+type isOneof_Union interface {
+ isOneof_Union()
+}
+type isOneof_Tormato interface {
+ isOneof_Tormato()
+}
+
+type Oneof_F_Bool struct {
+ F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"`
+}
+type Oneof_F_Int32 struct {
+ F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"`
+}
+type Oneof_F_Int64 struct {
+ F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"`
+}
+type Oneof_F_Fixed32 struct {
+ F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"`
+}
+type Oneof_F_Fixed64 struct {
+ F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"`
+}
+type Oneof_F_Uint32 struct {
+ F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"`
+}
+type Oneof_F_Uint64 struct {
+ F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"`
+}
+type Oneof_F_Float struct {
+ F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"`
+}
+type Oneof_F_Double struct {
+ F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"`
+}
+type Oneof_F_String struct {
+ F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"`
+}
+type Oneof_F_Bytes struct {
+ F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"`
+}
+type Oneof_F_Sint32 struct {
+ F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"`
+}
+type Oneof_F_Sint64 struct {
+ F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"`
+}
+type Oneof_F_Enum struct {
+ F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"`
+}
+type Oneof_F_Message struct {
+ F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"`
+}
+type Oneof_FGroup struct {
+ FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"`
+}
+type Oneof_F_Largest_Tag struct {
+ F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"`
+}
+type Oneof_Value struct {
+ Value int32 `protobuf:"varint,100,opt,name=value,oneof"`
+}
+
+func (*Oneof_F_Bool) isOneof_Union() {}
+func (*Oneof_F_Int32) isOneof_Union() {}
+func (*Oneof_F_Int64) isOneof_Union() {}
+func (*Oneof_F_Fixed32) isOneof_Union() {}
+func (*Oneof_F_Fixed64) isOneof_Union() {}
+func (*Oneof_F_Uint32) isOneof_Union() {}
+func (*Oneof_F_Uint64) isOneof_Union() {}
+func (*Oneof_F_Float) isOneof_Union() {}
+func (*Oneof_F_Double) isOneof_Union() {}
+func (*Oneof_F_String) isOneof_Union() {}
+func (*Oneof_F_Bytes) isOneof_Union() {}
+func (*Oneof_F_Sint32) isOneof_Union() {}
+func (*Oneof_F_Sint64) isOneof_Union() {}
+func (*Oneof_F_Enum) isOneof_Union() {}
+func (*Oneof_F_Message) isOneof_Union() {}
+func (*Oneof_FGroup) isOneof_Union() {}
+func (*Oneof_F_Largest_Tag) isOneof_Union() {}
+func (*Oneof_Value) isOneof_Tormato() {}
+
+func (m *Oneof) GetUnion() isOneof_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+func (m *Oneof) GetTormato() isOneof_Tormato {
+ if m != nil {
+ return m.Tormato
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Bool() bool {
+ if x, ok := m.GetUnion().(*Oneof_F_Bool); ok {
+ return x.F_Bool
+ }
+ return false
+}
+
+func (m *Oneof) GetF_Int32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int32); ok {
+ return x.F_Int32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Int64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Int64); ok {
+ return x.F_Int64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok {
+ return x.F_Fixed32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Fixed64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok {
+ return x.F_Fixed64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint32() uint32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok {
+ return x.F_Uint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Uint64() uint64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok {
+ return x.F_Uint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Float() float32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Float); ok {
+ return x.F_Float
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Double() float64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Double); ok {
+ return x.F_Double
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_String() string {
+ if x, ok := m.GetUnion().(*Oneof_F_String); ok {
+ return x.F_String
+ }
+ return ""
+}
+
+func (m *Oneof) GetF_Bytes() []byte {
+ if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok {
+ return x.F_Bytes
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Sint32() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok {
+ return x.F_Sint32
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Sint64() int64 {
+ if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok {
+ return x.F_Sint64
+ }
+ return 0
+}
+
+func (m *Oneof) GetF_Enum() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Oneof_F_Enum); ok {
+ return x.F_Enum
+ }
+ return MyMessage_RED
+}
+
+func (m *Oneof) GetF_Message() *GoTestField {
+ if x, ok := m.GetUnion().(*Oneof_F_Message); ok {
+ return x.F_Message
+ }
+ return nil
+}
+
+func (m *Oneof) GetFGroup() *Oneof_F_Group {
+ if x, ok := m.GetUnion().(*Oneof_FGroup); ok {
+ return x.FGroup
+ }
+ return nil
+}
+
+func (m *Oneof) GetF_Largest_Tag() int32 {
+ if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok {
+ return x.F_Largest_Tag
+ }
+ return 0
+}
+
+func (m *Oneof) GetValue() int32 {
+ if x, ok := m.GetTormato().(*Oneof_Value); ok {
+ return x.Value
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{
+ (*Oneof_F_Bool)(nil),
+ (*Oneof_F_Int32)(nil),
+ (*Oneof_F_Int64)(nil),
+ (*Oneof_F_Fixed32)(nil),
+ (*Oneof_F_Fixed64)(nil),
+ (*Oneof_F_Uint32)(nil),
+ (*Oneof_F_Uint64)(nil),
+ (*Oneof_F_Float)(nil),
+ (*Oneof_F_Double)(nil),
+ (*Oneof_F_String)(nil),
+ (*Oneof_F_Bytes)(nil),
+ (*Oneof_F_Sint32)(nil),
+ (*Oneof_F_Sint64)(nil),
+ (*Oneof_F_Enum)(nil),
+ (*Oneof_F_Message)(nil),
+ (*Oneof_FGroup)(nil),
+ (*Oneof_F_Largest_Tag)(nil),
+ (*Oneof_Value)(nil),
+ }
+}
+
+func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ t := uint64(0)
+ if x.F_Bool {
+ t = 1
+ }
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Oneof_F_Int32:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ b.EncodeVarint(4<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(x.F_Fixed32))
+ case *Oneof_F_Fixed64:
+ b.EncodeVarint(5<<3 | proto.WireFixed64)
+ b.EncodeFixed64(uint64(x.F_Fixed64))
+ case *Oneof_F_Uint32:
+ b.EncodeVarint(6<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ b.EncodeVarint(7<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ b.EncodeVarint(8<<3 | proto.WireFixed32)
+ b.EncodeFixed32(uint64(math.Float32bits(x.F_Float)))
+ case *Oneof_F_Double:
+ b.EncodeVarint(9<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.F_Double))
+ case *Oneof_F_String:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.F_String)
+ case *Oneof_F_Bytes:
+ b.EncodeVarint(11<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ b.EncodeVarint(12<<3 | proto.WireVarint)
+ b.EncodeZigzag32(uint64(x.F_Sint32))
+ case *Oneof_F_Sint64:
+ b.EncodeVarint(13<<3 | proto.WireVarint)
+ b.EncodeZigzag64(uint64(x.F_Sint64))
+ case *Oneof_F_Enum:
+ b.EncodeVarint(14<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ b.EncodeVarint(15<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.F_Message); err != nil {
+ return err
+ }
+ case *Oneof_FGroup:
+ b.EncodeVarint(16<<3 | proto.WireStartGroup)
+ if err := b.Marshal(x.FGroup); err != nil {
+ return err
+ }
+ b.EncodeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ b.EncodeVarint(536870911<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Union has unexpected type %T", x)
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ b.EncodeVarint(100<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Value))
+ case nil:
+ default:
+ return fmt.Errorf("Oneof.Tormato has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Oneof)
+ switch tag {
+ case 1: // union.F_Bool
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Bool{x != 0}
+ return true, err
+ case 2: // union.F_Int32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int32{int32(x)}
+ return true, err
+ case 3: // union.F_Int64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Int64{int64(x)}
+ return true, err
+ case 4: // union.F_Fixed32
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Fixed32{uint32(x)}
+ return true, err
+ case 5: // union.F_Fixed64
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Fixed64{x}
+ return true, err
+ case 6: // union.F_Uint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint32{uint32(x)}
+ return true, err
+ case 7: // union.F_Uint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Uint64{x}
+ return true, err
+ case 8: // union.F_Float
+ if wire != proto.WireFixed32 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed32()
+ m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))}
+ return true, err
+ case 9: // union.F_Double
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Oneof_F_Double{math.Float64frombits(x)}
+ return true, err
+ case 10: // union.F_String
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Oneof_F_String{x}
+ return true, err
+ case 11: // union.F_Bytes
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Oneof_F_Bytes{x}
+ return true, err
+ case 12: // union.F_Sint32
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag32()
+ m.Union = &Oneof_F_Sint32{int32(x)}
+ return true, err
+ case 13: // union.F_Sint64
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeZigzag64()
+ m.Union = &Oneof_F_Sint64{int64(x)}
+ return true, err
+ case 14: // union.F_Enum
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Enum{MyMessage_Color(x)}
+ return true, err
+ case 15: // union.F_Message
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(GoTestField)
+ err := b.DecodeMessage(msg)
+ m.Union = &Oneof_F_Message{msg}
+ return true, err
+ case 16: // union.f_group
+ if wire != proto.WireStartGroup {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Oneof_F_Group)
+ err := b.DecodeGroup(msg)
+ m.Union = &Oneof_FGroup{msg}
+ return true, err
+ case 536870911: // union.F_Largest_Tag
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Oneof_F_Largest_Tag{int32(x)}
+ return true, err
+ case 100: // tormato.value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Tormato = &Oneof_Value{int32(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Oneof_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Oneof)
+ // union
+ switch x := m.Union.(type) {
+ case *Oneof_F_Bool:
+ n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += 1
+ case *Oneof_F_Int32:
+ n += proto.SizeVarint(2<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int32))
+ case *Oneof_F_Int64:
+ n += proto.SizeVarint(3<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Int64))
+ case *Oneof_F_Fixed32:
+ n += proto.SizeVarint(4<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Fixed64:
+ n += proto.SizeVarint(5<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_Uint32:
+ n += proto.SizeVarint(6<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint32))
+ case *Oneof_F_Uint64:
+ n += proto.SizeVarint(7<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Uint64))
+ case *Oneof_F_Float:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed32)
+ n += 4
+ case *Oneof_F_Double:
+ n += proto.SizeVarint(9<<3 | proto.WireFixed64)
+ n += 8
+ case *Oneof_F_String:
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_String)))
+ n += len(x.F_String)
+ case *Oneof_F_Bytes:
+ n += proto.SizeVarint(11<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.F_Bytes)))
+ n += len(x.F_Bytes)
+ case *Oneof_F_Sint32:
+ n += proto.SizeVarint(12<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31))))
+ case *Oneof_F_Sint64:
+ n += proto.SizeVarint(13<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63))))
+ case *Oneof_F_Enum:
+ n += proto.SizeVarint(14<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Enum))
+ case *Oneof_F_Message:
+ s := proto.Size(x.F_Message)
+ n += proto.SizeVarint(15<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Oneof_FGroup:
+ n += proto.SizeVarint(16<<3 | proto.WireStartGroup)
+ n += proto.Size(x.FGroup)
+ n += proto.SizeVarint(16<<3 | proto.WireEndGroup)
+ case *Oneof_F_Largest_Tag:
+ n += proto.SizeVarint(536870911<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.F_Largest_Tag))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ // tormato
+ switch x := m.Tormato.(type) {
+ case *Oneof_Value:
+ n += proto.SizeVarint(100<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Value))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Oneof_F_Group struct {
+ X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} }
+func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) }
+func (*Oneof_F_Group) ProtoMessage() {}
+func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} }
+
+func (m *Oneof_F_Group) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type Communique struct {
+ MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
+ // This is a oneof, called "union".
+ //
+ // Types that are valid to be assigned to Union:
+ // *Communique_Number
+ // *Communique_Name
+ // *Communique_Data
+ // *Communique_TempC
+ // *Communique_Col
+ // *Communique_Msg
+ Union isCommunique_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Communique) Reset() { *m = Communique{} }
+func (m *Communique) String() string { return proto.CompactTextString(m) }
+func (*Communique) ProtoMessage() {}
+func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+type isCommunique_Union interface {
+ isCommunique_Union()
+}
+
+type Communique_Number struct {
+ Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
+}
+type Communique_Name struct {
+ Name string `protobuf:"bytes,6,opt,name=name,oneof"`
+}
+type Communique_Data struct {
+ Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
+}
+type Communique_TempC struct {
+ TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
+}
+type Communique_Col struct {
+ Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"`
+}
+type Communique_Msg struct {
+ Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"`
+}
+
+func (*Communique_Number) isCommunique_Union() {}
+func (*Communique_Name) isCommunique_Union() {}
+func (*Communique_Data) isCommunique_Union() {}
+func (*Communique_TempC) isCommunique_Union() {}
+func (*Communique_Col) isCommunique_Union() {}
+func (*Communique_Msg) isCommunique_Union() {}
+
+func (m *Communique) GetUnion() isCommunique_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+}
+
+func (m *Communique) GetMakeMeCry() bool {
+ if m != nil && m.MakeMeCry != nil {
+ return *m.MakeMeCry
+ }
+ return false
+}
+
+func (m *Communique) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Communique_Number); ok {
+ return x.Number
+ }
+ return 0
+}
+
+func (m *Communique) GetName() string {
+ if x, ok := m.GetUnion().(*Communique_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Communique) GetData() []byte {
+ if x, ok := m.GetUnion().(*Communique_Data); ok {
+ return x.Data
+ }
+ return nil
+}
+
+func (m *Communique) GetTempC() float64 {
+ if x, ok := m.GetUnion().(*Communique_TempC); ok {
+ return x.TempC
+ }
+ return 0
+}
+
+func (m *Communique) GetCol() MyMessage_Color {
+ if x, ok := m.GetUnion().(*Communique_Col); ok {
+ return x.Col
+ }
+ return MyMessage_RED
+}
+
+func (m *Communique) GetMsg() *Strings {
+ if x, ok := m.GetUnion().(*Communique_Msg); ok {
+ return x.Msg
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
+ (*Communique_Number)(nil),
+ (*Communique_Name)(nil),
+ (*Communique_Data)(nil),
+ (*Communique_TempC)(nil),
+ (*Communique_Col)(nil),
+ (*Communique_Msg)(nil),
+ }
+}
+
+func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Number))
+ case *Communique_Name:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case *Communique_Data:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.Data)
+ case *Communique_TempC:
+ b.EncodeVarint(8<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.TempC))
+ case *Communique_Col:
+ b.EncodeVarint(9<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Msg); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Communique.Union has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Communique)
+ switch tag {
+ case 5: // union.number
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Number{int32(x)}
+ return true, err
+ case 6: // union.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Union = &Communique_Name{x}
+ return true, err
+ case 7: // union.data
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Union = &Communique_Data{x}
+ return true, err
+ case 8: // union.temp_c
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Union = &Communique_TempC{math.Float64frombits(x)}
+ return true, err
+ case 9: // union.col
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Union = &Communique_Col{MyMessage_Color(x)}
+ return true, err
+ case 10: // union.msg
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Strings)
+ err := b.DecodeMessage(msg)
+ m.Union = &Communique_Msg{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Communique_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Communique)
+ // union
+ switch x := m.Union.(type) {
+ case *Communique_Number:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Number))
+ case *Communique_Name:
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case *Communique_Data:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Data)))
+ n += len(x.Data)
+ case *Communique_TempC:
+ n += proto.SizeVarint(8<<3 | proto.WireFixed64)
+ n += 8
+ case *Communique_Col:
+ n += proto.SizeVarint(9<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Col))
+ case *Communique_Msg:
+ s := proto.Size(x.Msg)
+ n += proto.SizeVarint(10<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+var E_Greeting = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 106,
+ Name: "testdata.greeting",
+ Tag: "bytes,106,rep,name=greeting",
+ Filename: "test.proto",
+}
+
+var E_Complex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: (*ComplexExtension)(nil),
+ Field: 200,
+ Name: "testdata.complex",
+ Tag: "bytes,200,opt,name=complex",
+ Filename: "test.proto",
+}
+
+var E_RComplex = &proto.ExtensionDesc{
+ ExtendedType: (*OtherMessage)(nil),
+ ExtensionType: ([]*ComplexExtension)(nil),
+ Field: 201,
+ Name: "testdata.r_complex",
+ Tag: "bytes,201,rep,name=r_complex,json=rComplex",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 101,
+ Name: "testdata.no_default_double",
+ Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 102,
+ Name: "testdata.no_default_float",
+ Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 103,
+ Name: "testdata.no_default_int32",
+ Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 104,
+ Name: "testdata.no_default_int64",
+ Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 105,
+ Name: "testdata.no_default_uint32",
+ Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 106,
+ Name: "testdata.no_default_uint64",
+ Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 107,
+ Name: "testdata.no_default_sint32",
+ Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 108,
+ Name: "testdata.no_default_sint64",
+ Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 109,
+ Name: "testdata.no_default_fixed32",
+ Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 110,
+ Name: "testdata.no_default_fixed64",
+ Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 111,
+ Name: "testdata.no_default_sfixed32",
+ Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 112,
+ Name: "testdata.no_default_sfixed64",
+ Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 113,
+ Name: "testdata.no_default_bool",
+ Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 114,
+ Name: "testdata.no_default_string",
+ Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 115,
+ Name: "testdata.no_default_bytes",
+ Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes",
+ Filename: "test.proto",
+}
+
+var E_NoDefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 116,
+ Name: "testdata.no_default_enum",
+ Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum",
+ Filename: "test.proto",
+}
+
+var E_DefaultDouble = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float64)(nil),
+ Field: 201,
+ Name: "testdata.default_double",
+ Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415",
+ Filename: "test.proto",
+}
+
+var E_DefaultFloat = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*float32)(nil),
+ Field: 202,
+ Name: "testdata.default_float",
+ Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14",
+ Filename: "test.proto",
+}
+
+var E_DefaultInt32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 203,
+ Name: "testdata.default_int32",
+ Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42",
+ Filename: "test.proto",
+}
+
+var E_DefaultInt64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 204,
+ Name: "testdata.default_int64",
+ Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43",
+ Filename: "test.proto",
+}
+
+var E_DefaultUint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 205,
+ Name: "testdata.default_uint32",
+ Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44",
+ Filename: "test.proto",
+}
+
+var E_DefaultUint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 206,
+ Name: "testdata.default_uint64",
+ Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45",
+ Filename: "test.proto",
+}
+
+var E_DefaultSint32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 207,
+ Name: "testdata.default_sint32",
+ Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46",
+ Filename: "test.proto",
+}
+
+var E_DefaultSint64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 208,
+ Name: "testdata.default_sint64",
+ Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47",
+ Filename: "test.proto",
+}
+
+var E_DefaultFixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint32)(nil),
+ Field: 209,
+ Name: "testdata.default_fixed32",
+ Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48",
+ Filename: "test.proto",
+}
+
+var E_DefaultFixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*uint64)(nil),
+ Field: 210,
+ Name: "testdata.default_fixed64",
+ Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49",
+ Filename: "test.proto",
+}
+
+var E_DefaultSfixed32 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 211,
+ Name: "testdata.default_sfixed32",
+ Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50",
+ Filename: "test.proto",
+}
+
+var E_DefaultSfixed64 = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*int64)(nil),
+ Field: 212,
+ Name: "testdata.default_sfixed64",
+ Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51",
+ Filename: "test.proto",
+}
+
+var E_DefaultBool = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 213,
+ Name: "testdata.default_bool",
+ Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1",
+ Filename: "test.proto",
+}
+
+var E_DefaultString = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 214,
+ Name: "testdata.default_string",
+ Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string",
+ Filename: "test.proto",
+}
+
+var E_DefaultBytes = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: ([]byte)(nil),
+ Field: 215,
+ Name: "testdata.default_bytes",
+ Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes",
+ Filename: "test.proto",
+}
+
+var E_DefaultEnum = &proto.ExtensionDesc{
+ ExtendedType: (*DefaultsMessage)(nil),
+ ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
+ Field: 216,
+ Name: "testdata.default_enum",
+ Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1",
+ Filename: "test.proto",
+}
+
+var E_X201 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 201,
+ Name: "testdata.x201",
+ Tag: "bytes,201,opt,name=x201",
+ Filename: "test.proto",
+}
+
+var E_X202 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 202,
+ Name: "testdata.x202",
+ Tag: "bytes,202,opt,name=x202",
+ Filename: "test.proto",
+}
+
+var E_X203 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 203,
+ Name: "testdata.x203",
+ Tag: "bytes,203,opt,name=x203",
+ Filename: "test.proto",
+}
+
+var E_X204 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 204,
+ Name: "testdata.x204",
+ Tag: "bytes,204,opt,name=x204",
+ Filename: "test.proto",
+}
+
+var E_X205 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 205,
+ Name: "testdata.x205",
+ Tag: "bytes,205,opt,name=x205",
+ Filename: "test.proto",
+}
+
+var E_X206 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 206,
+ Name: "testdata.x206",
+ Tag: "bytes,206,opt,name=x206",
+ Filename: "test.proto",
+}
+
+var E_X207 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 207,
+ Name: "testdata.x207",
+ Tag: "bytes,207,opt,name=x207",
+ Filename: "test.proto",
+}
+
+var E_X208 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 208,
+ Name: "testdata.x208",
+ Tag: "bytes,208,opt,name=x208",
+ Filename: "test.proto",
+}
+
+var E_X209 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 209,
+ Name: "testdata.x209",
+ Tag: "bytes,209,opt,name=x209",
+ Filename: "test.proto",
+}
+
+var E_X210 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 210,
+ Name: "testdata.x210",
+ Tag: "bytes,210,opt,name=x210",
+ Filename: "test.proto",
+}
+
+var E_X211 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 211,
+ Name: "testdata.x211",
+ Tag: "bytes,211,opt,name=x211",
+ Filename: "test.proto",
+}
+
+var E_X212 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 212,
+ Name: "testdata.x212",
+ Tag: "bytes,212,opt,name=x212",
+ Filename: "test.proto",
+}
+
+var E_X213 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 213,
+ Name: "testdata.x213",
+ Tag: "bytes,213,opt,name=x213",
+ Filename: "test.proto",
+}
+
+var E_X214 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 214,
+ Name: "testdata.x214",
+ Tag: "bytes,214,opt,name=x214",
+ Filename: "test.proto",
+}
+
+var E_X215 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 215,
+ Name: "testdata.x215",
+ Tag: "bytes,215,opt,name=x215",
+ Filename: "test.proto",
+}
+
+var E_X216 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 216,
+ Name: "testdata.x216",
+ Tag: "bytes,216,opt,name=x216",
+ Filename: "test.proto",
+}
+
+var E_X217 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 217,
+ Name: "testdata.x217",
+ Tag: "bytes,217,opt,name=x217",
+ Filename: "test.proto",
+}
+
+var E_X218 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 218,
+ Name: "testdata.x218",
+ Tag: "bytes,218,opt,name=x218",
+ Filename: "test.proto",
+}
+
+var E_X219 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 219,
+ Name: "testdata.x219",
+ Tag: "bytes,219,opt,name=x219",
+ Filename: "test.proto",
+}
+
+var E_X220 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 220,
+ Name: "testdata.x220",
+ Tag: "bytes,220,opt,name=x220",
+ Filename: "test.proto",
+}
+
+var E_X221 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 221,
+ Name: "testdata.x221",
+ Tag: "bytes,221,opt,name=x221",
+ Filename: "test.proto",
+}
+
+var E_X222 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 222,
+ Name: "testdata.x222",
+ Tag: "bytes,222,opt,name=x222",
+ Filename: "test.proto",
+}
+
+var E_X223 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 223,
+ Name: "testdata.x223",
+ Tag: "bytes,223,opt,name=x223",
+ Filename: "test.proto",
+}
+
+var E_X224 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 224,
+ Name: "testdata.x224",
+ Tag: "bytes,224,opt,name=x224",
+ Filename: "test.proto",
+}
+
+var E_X225 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 225,
+ Name: "testdata.x225",
+ Tag: "bytes,225,opt,name=x225",
+ Filename: "test.proto",
+}
+
+var E_X226 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 226,
+ Name: "testdata.x226",
+ Tag: "bytes,226,opt,name=x226",
+ Filename: "test.proto",
+}
+
+var E_X227 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 227,
+ Name: "testdata.x227",
+ Tag: "bytes,227,opt,name=x227",
+ Filename: "test.proto",
+}
+
+var E_X228 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 228,
+ Name: "testdata.x228",
+ Tag: "bytes,228,opt,name=x228",
+ Filename: "test.proto",
+}
+
+var E_X229 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 229,
+ Name: "testdata.x229",
+ Tag: "bytes,229,opt,name=x229",
+ Filename: "test.proto",
+}
+
+var E_X230 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 230,
+ Name: "testdata.x230",
+ Tag: "bytes,230,opt,name=x230",
+ Filename: "test.proto",
+}
+
+var E_X231 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 231,
+ Name: "testdata.x231",
+ Tag: "bytes,231,opt,name=x231",
+ Filename: "test.proto",
+}
+
+var E_X232 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 232,
+ Name: "testdata.x232",
+ Tag: "bytes,232,opt,name=x232",
+ Filename: "test.proto",
+}
+
+var E_X233 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 233,
+ Name: "testdata.x233",
+ Tag: "bytes,233,opt,name=x233",
+ Filename: "test.proto",
+}
+
+var E_X234 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 234,
+ Name: "testdata.x234",
+ Tag: "bytes,234,opt,name=x234",
+ Filename: "test.proto",
+}
+
+var E_X235 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 235,
+ Name: "testdata.x235",
+ Tag: "bytes,235,opt,name=x235",
+ Filename: "test.proto",
+}
+
+var E_X236 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 236,
+ Name: "testdata.x236",
+ Tag: "bytes,236,opt,name=x236",
+ Filename: "test.proto",
+}
+
+var E_X237 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 237,
+ Name: "testdata.x237",
+ Tag: "bytes,237,opt,name=x237",
+ Filename: "test.proto",
+}
+
+var E_X238 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 238,
+ Name: "testdata.x238",
+ Tag: "bytes,238,opt,name=x238",
+ Filename: "test.proto",
+}
+
+var E_X239 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 239,
+ Name: "testdata.x239",
+ Tag: "bytes,239,opt,name=x239",
+ Filename: "test.proto",
+}
+
+var E_X240 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 240,
+ Name: "testdata.x240",
+ Tag: "bytes,240,opt,name=x240",
+ Filename: "test.proto",
+}
+
+var E_X241 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 241,
+ Name: "testdata.x241",
+ Tag: "bytes,241,opt,name=x241",
+ Filename: "test.proto",
+}
+
+var E_X242 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 242,
+ Name: "testdata.x242",
+ Tag: "bytes,242,opt,name=x242",
+ Filename: "test.proto",
+}
+
+var E_X243 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 243,
+ Name: "testdata.x243",
+ Tag: "bytes,243,opt,name=x243",
+ Filename: "test.proto",
+}
+
+var E_X244 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 244,
+ Name: "testdata.x244",
+ Tag: "bytes,244,opt,name=x244",
+ Filename: "test.proto",
+}
+
+var E_X245 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 245,
+ Name: "testdata.x245",
+ Tag: "bytes,245,opt,name=x245",
+ Filename: "test.proto",
+}
+
+var E_X246 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 246,
+ Name: "testdata.x246",
+ Tag: "bytes,246,opt,name=x246",
+ Filename: "test.proto",
+}
+
+var E_X247 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 247,
+ Name: "testdata.x247",
+ Tag: "bytes,247,opt,name=x247",
+ Filename: "test.proto",
+}
+
+var E_X248 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 248,
+ Name: "testdata.x248",
+ Tag: "bytes,248,opt,name=x248",
+ Filename: "test.proto",
+}
+
+var E_X249 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 249,
+ Name: "testdata.x249",
+ Tag: "bytes,249,opt,name=x249",
+ Filename: "test.proto",
+}
+
+var E_X250 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 250,
+ Name: "testdata.x250",
+ Tag: "bytes,250,opt,name=x250",
+ Filename: "test.proto",
+}
+
+func init() {
+ proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum")
+ proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField")
+ proto.RegisterType((*GoTest)(nil), "testdata.GoTest")
+ proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup")
+ proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup")
+ proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup")
+ proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField")
+ proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group")
+ proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest")
+ proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup")
+ proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest")
+ proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest")
+ proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag")
+ proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage")
+ proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested")
+ proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage")
+ proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested")
+ proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage")
+ proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage")
+ proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage")
+ proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage")
+ proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup")
+ proto.RegisterType((*Ext)(nil), "testdata.Ext")
+ proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension")
+ proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage")
+ proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet")
+ proto.RegisterType((*Empty)(nil), "testdata.Empty")
+ proto.RegisterType((*MessageList)(nil), "testdata.MessageList")
+ proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message")
+ proto.RegisterType((*Strings)(nil), "testdata.Strings")
+ proto.RegisterType((*Defaults)(nil), "testdata.Defaults")
+ proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults")
+ proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum")
+ proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated")
+ proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld")
+ proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G")
+ proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew")
+ proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G")
+ proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint")
+ proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap")
+ proto.RegisterType((*Oneof)(nil), "testdata.Oneof")
+ proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group")
+ proto.RegisterType((*Communique)(nil), "testdata.Communique")
+ proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
+ proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
+ proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
+ proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value)
+ proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
+ proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
+ proto.RegisterExtension(E_Ext_More)
+ proto.RegisterExtension(E_Ext_Text)
+ proto.RegisterExtension(E_Ext_Number)
+ proto.RegisterExtension(E_Greeting)
+ proto.RegisterExtension(E_Complex)
+ proto.RegisterExtension(E_RComplex)
+ proto.RegisterExtension(E_NoDefaultDouble)
+ proto.RegisterExtension(E_NoDefaultFloat)
+ proto.RegisterExtension(E_NoDefaultInt32)
+ proto.RegisterExtension(E_NoDefaultInt64)
+ proto.RegisterExtension(E_NoDefaultUint32)
+ proto.RegisterExtension(E_NoDefaultUint64)
+ proto.RegisterExtension(E_NoDefaultSint32)
+ proto.RegisterExtension(E_NoDefaultSint64)
+ proto.RegisterExtension(E_NoDefaultFixed32)
+ proto.RegisterExtension(E_NoDefaultFixed64)
+ proto.RegisterExtension(E_NoDefaultSfixed32)
+ proto.RegisterExtension(E_NoDefaultSfixed64)
+ proto.RegisterExtension(E_NoDefaultBool)
+ proto.RegisterExtension(E_NoDefaultString)
+ proto.RegisterExtension(E_NoDefaultBytes)
+ proto.RegisterExtension(E_NoDefaultEnum)
+ proto.RegisterExtension(E_DefaultDouble)
+ proto.RegisterExtension(E_DefaultFloat)
+ proto.RegisterExtension(E_DefaultInt32)
+ proto.RegisterExtension(E_DefaultInt64)
+ proto.RegisterExtension(E_DefaultUint32)
+ proto.RegisterExtension(E_DefaultUint64)
+ proto.RegisterExtension(E_DefaultSint32)
+ proto.RegisterExtension(E_DefaultSint64)
+ proto.RegisterExtension(E_DefaultFixed32)
+ proto.RegisterExtension(E_DefaultFixed64)
+ proto.RegisterExtension(E_DefaultSfixed32)
+ proto.RegisterExtension(E_DefaultSfixed64)
+ proto.RegisterExtension(E_DefaultBool)
+ proto.RegisterExtension(E_DefaultString)
+ proto.RegisterExtension(E_DefaultBytes)
+ proto.RegisterExtension(E_DefaultEnum)
+ proto.RegisterExtension(E_X201)
+ proto.RegisterExtension(E_X202)
+ proto.RegisterExtension(E_X203)
+ proto.RegisterExtension(E_X204)
+ proto.RegisterExtension(E_X205)
+ proto.RegisterExtension(E_X206)
+ proto.RegisterExtension(E_X207)
+ proto.RegisterExtension(E_X208)
+ proto.RegisterExtension(E_X209)
+ proto.RegisterExtension(E_X210)
+ proto.RegisterExtension(E_X211)
+ proto.RegisterExtension(E_X212)
+ proto.RegisterExtension(E_X213)
+ proto.RegisterExtension(E_X214)
+ proto.RegisterExtension(E_X215)
+ proto.RegisterExtension(E_X216)
+ proto.RegisterExtension(E_X217)
+ proto.RegisterExtension(E_X218)
+ proto.RegisterExtension(E_X219)
+ proto.RegisterExtension(E_X220)
+ proto.RegisterExtension(E_X221)
+ proto.RegisterExtension(E_X222)
+ proto.RegisterExtension(E_X223)
+ proto.RegisterExtension(E_X224)
+ proto.RegisterExtension(E_X225)
+ proto.RegisterExtension(E_X226)
+ proto.RegisterExtension(E_X227)
+ proto.RegisterExtension(E_X228)
+ proto.RegisterExtension(E_X229)
+ proto.RegisterExtension(E_X230)
+ proto.RegisterExtension(E_X231)
+ proto.RegisterExtension(E_X232)
+ proto.RegisterExtension(E_X233)
+ proto.RegisterExtension(E_X234)
+ proto.RegisterExtension(E_X235)
+ proto.RegisterExtension(E_X236)
+ proto.RegisterExtension(E_X237)
+ proto.RegisterExtension(E_X238)
+ proto.RegisterExtension(E_X239)
+ proto.RegisterExtension(E_X240)
+ proto.RegisterExtension(E_X241)
+ proto.RegisterExtension(E_X242)
+ proto.RegisterExtension(E_X243)
+ proto.RegisterExtension(E_X244)
+ proto.RegisterExtension(E_X245)
+ proto.RegisterExtension(E_X246)
+ proto.RegisterExtension(E_X247)
+ proto.RegisterExtension(E_X248)
+ proto.RegisterExtension(E_X249)
+ proto.RegisterExtension(E_X250)
+}
+
+func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 4453 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48,
+ 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e,
+ 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a,
+ 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52,
+ 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9,
+ 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf,
+ 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab,
+ 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b,
+ 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed,
+ 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94,
+ 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f,
+ 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9,
+ 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7,
+ 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1,
+ 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce,
+ 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f,
+ 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19,
+ 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3,
+ 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6,
+ 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6,
+ 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95,
+ 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2,
+ 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9,
+ 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05,
+ 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1,
+ 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a,
+ 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f,
+ 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8,
+ 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c,
+ 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a,
+ 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55,
+ 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61,
+ 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05,
+ 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68,
+ 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0,
+ 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb,
+ 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58,
+ 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7,
+ 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d,
+ 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95,
+ 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9,
+ 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e,
+ 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c,
+ 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13,
+ 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0,
+ 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6,
+ 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3,
+ 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03,
+ 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9,
+ 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99,
+ 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6,
+ 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef,
+ 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16,
+ 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90,
+ 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd,
+ 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a,
+ 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53,
+ 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac,
+ 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a,
+ 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7,
+ 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58,
+ 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d,
+ 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e,
+ 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0,
+ 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56,
+ 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac,
+ 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40,
+ 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2,
+ 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5,
+ 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e,
+ 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8,
+ 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad,
+ 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7,
+ 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89,
+ 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55,
+ 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b,
+ 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae,
+ 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb,
+ 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13,
+ 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff,
+ 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f,
+ 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93,
+ 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad,
+ 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7,
+ 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd,
+ 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6,
+ 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd,
+ 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2,
+ 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8,
+ 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45,
+ 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30,
+ 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14,
+ 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4,
+ 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6,
+ 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74,
+ 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b,
+ 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1,
+ 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90,
+ 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36,
+ 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30,
+ 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c,
+ 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a,
+ 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d,
+ 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2,
+ 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f,
+ 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f,
+ 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70,
+ 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4,
+ 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf,
+ 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81,
+ 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73,
+ 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d,
+ 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e,
+ 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1,
+ 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8,
+ 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27,
+ 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1,
+ 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09,
+ 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05,
+ 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60,
+ 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30,
+ 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a,
+ 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7,
+ 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd,
+ 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07,
+ 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf,
+ 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d,
+ 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d,
+ 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d,
+ 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40,
+ 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4,
+ 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8,
+ 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8,
+ 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d,
+ 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c,
+ 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79,
+ 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c,
+ 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f,
+ 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7,
+ 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3,
+ 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde,
+ 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce,
+ 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50,
+ 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52,
+ 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90,
+ 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa,
+ 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2,
+ 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab,
+ 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf,
+ 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a,
+ 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e,
+ 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e,
+ 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7,
+ 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff,
+ 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62,
+ 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0,
+ 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4,
+ 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b,
+ 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc,
+ 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e,
+ 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86,
+ 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26,
+ 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90,
+ 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b,
+ 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31,
+ 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2,
+ 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05,
+ 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8,
+ 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10,
+ 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d,
+ 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81,
+ 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12,
+ 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93,
+ 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63,
+ 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48,
+ 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6,
+ 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36,
+ 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b,
+ 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96,
+ 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7,
+ 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80,
+ 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67,
+ 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84,
+ 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96,
+ 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a,
+ 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec,
+ 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a,
+ 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67,
+ 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d,
+ 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1,
+ 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8,
+ 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9,
+ 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87,
+ 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f,
+ 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21,
+ 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3,
+ 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16,
+ 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77,
+ 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58,
+ 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e,
+ 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87,
+ 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79,
+ 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd,
+ 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5,
+ 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee,
+ 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76,
+ 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9,
+ 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a,
+ 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22,
+ 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a,
+ 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48,
+ 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36,
+ 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22,
+ 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f,
+ 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39,
+ 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e,
+ 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06,
+ 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e,
+ 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23,
+ 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43,
+ 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50,
+ 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75,
+ 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d,
+ 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb,
+ 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80,
+ 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32,
+ 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec,
+ 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83,
+ 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08,
+ 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e,
+ 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56,
+ 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c,
+ 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37,
+ 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd,
+ 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0,
+ 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7,
+ 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98,
+ 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9,
+ 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee,
+ 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d,
+ 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9,
+ 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b,
+ 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe,
+ 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5,
+ 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab,
+ 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3,
+ 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b,
+ 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4,
+ 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44,
+ 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89,
+ 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38,
+ 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5,
+ 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7,
+ 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61,
+ 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7,
+ 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3,
+ 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b,
+ 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b,
+ 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c,
+ 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c,
+ 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd,
+ 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a,
+ 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16,
+ 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a,
+ 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7,
+ 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36,
+ 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7,
+ 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda,
+ 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad,
+ 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a,
+ 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6,
+ 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb,
+ 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63,
+ 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf,
+ 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed,
+ 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a,
+ 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47,
+ 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd,
+ 0x32, 0x57, 0x39, 0x00, 0x00,
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
new file mode 100644
index 00000000..70e3cfcd
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
@@ -0,0 +1,548 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A feature-rich test file for the protocol compiler and libraries.
+
+syntax = "proto2";
+
+package testdata;
+
+enum FOO { FOO1 = 1; };
+
+message GoEnum {
+ required FOO foo = 1;
+}
+
+message GoTestField {
+ required string Label = 1;
+ required string Type = 2;
+}
+
+message GoTest {
+ // An enum, for completeness.
+ enum KIND {
+ VOID = 0;
+
+ // Basic types
+ BOOL = 1;
+ BYTES = 2;
+ FINGERPRINT = 3;
+ FLOAT = 4;
+ INT = 5;
+ STRING = 6;
+ TIME = 7;
+
+ // Groupings
+ TUPLE = 8;
+ ARRAY = 9;
+ MAP = 10;
+
+ // Table types
+ TABLE = 11;
+
+ // Functions
+ FUNCTION = 12; // last tag
+ };
+
+ // Some typical parameters
+ required KIND Kind = 1;
+ optional string Table = 2;
+ optional int32 Param = 3;
+
+ // Required, repeated and optional foreign fields.
+ required GoTestField RequiredField = 4;
+ repeated GoTestField RepeatedField = 5;
+ optional GoTestField OptionalField = 6;
+
+ // Required fields of all basic types
+ required bool F_Bool_required = 10;
+ required int32 F_Int32_required = 11;
+ required int64 F_Int64_required = 12;
+ required fixed32 F_Fixed32_required = 13;
+ required fixed64 F_Fixed64_required = 14;
+ required uint32 F_Uint32_required = 15;
+ required uint64 F_Uint64_required = 16;
+ required float F_Float_required = 17;
+ required double F_Double_required = 18;
+ required string F_String_required = 19;
+ required bytes F_Bytes_required = 101;
+ required sint32 F_Sint32_required = 102;
+ required sint64 F_Sint64_required = 103;
+
+ // Repeated fields of all basic types
+ repeated bool F_Bool_repeated = 20;
+ repeated int32 F_Int32_repeated = 21;
+ repeated int64 F_Int64_repeated = 22;
+ repeated fixed32 F_Fixed32_repeated = 23;
+ repeated fixed64 F_Fixed64_repeated = 24;
+ repeated uint32 F_Uint32_repeated = 25;
+ repeated uint64 F_Uint64_repeated = 26;
+ repeated float F_Float_repeated = 27;
+ repeated double F_Double_repeated = 28;
+ repeated string F_String_repeated = 29;
+ repeated bytes F_Bytes_repeated = 201;
+ repeated sint32 F_Sint32_repeated = 202;
+ repeated sint64 F_Sint64_repeated = 203;
+
+ // Optional fields of all basic types
+ optional bool F_Bool_optional = 30;
+ optional int32 F_Int32_optional = 31;
+ optional int64 F_Int64_optional = 32;
+ optional fixed32 F_Fixed32_optional = 33;
+ optional fixed64 F_Fixed64_optional = 34;
+ optional uint32 F_Uint32_optional = 35;
+ optional uint64 F_Uint64_optional = 36;
+ optional float F_Float_optional = 37;
+ optional double F_Double_optional = 38;
+ optional string F_String_optional = 39;
+ optional bytes F_Bytes_optional = 301;
+ optional sint32 F_Sint32_optional = 302;
+ optional sint64 F_Sint64_optional = 303;
+
+ // Default-valued fields of all basic types
+ optional bool F_Bool_defaulted = 40 [default=true];
+ optional int32 F_Int32_defaulted = 41 [default=32];
+ optional int64 F_Int64_defaulted = 42 [default=64];
+ optional fixed32 F_Fixed32_defaulted = 43 [default=320];
+ optional fixed64 F_Fixed64_defaulted = 44 [default=640];
+ optional uint32 F_Uint32_defaulted = 45 [default=3200];
+ optional uint64 F_Uint64_defaulted = 46 [default=6400];
+ optional float F_Float_defaulted = 47 [default=314159.];
+ optional double F_Double_defaulted = 48 [default=271828.];
+ optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
+ optional sint32 F_Sint32_defaulted = 402 [default = -32];
+ optional sint64 F_Sint64_defaulted = 403 [default = -64];
+
+ // Packed repeated fields (no string or bytes).
+ repeated bool F_Bool_repeated_packed = 50 [packed=true];
+ repeated int32 F_Int32_repeated_packed = 51 [packed=true];
+ repeated int64 F_Int64_repeated_packed = 52 [packed=true];
+ repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
+ repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
+ repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
+ repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
+ repeated float F_Float_repeated_packed = 57 [packed=true];
+ repeated double F_Double_repeated_packed = 58 [packed=true];
+ repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
+ repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
+
+ // Required, repeated, and optional groups.
+ required group RequiredGroup = 70 {
+ required string RequiredField = 71;
+ };
+
+ repeated group RepeatedGroup = 80 {
+ required string RequiredField = 81;
+ };
+
+ optional group OptionalGroup = 90 {
+ required string RequiredField = 91;
+ };
+}
+
+// For testing a group containing a required field.
+message GoTestRequiredGroupField {
+ required group Group = 1 {
+ required int32 Field = 2;
+ };
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+message GoSkipTest {
+ required int32 skip_int32 = 11;
+ required fixed32 skip_fixed32 = 12;
+ required fixed64 skip_fixed64 = 13;
+ required string skip_string = 14;
+ required group SkipGroup = 15 {
+ required int32 group_int32 = 16;
+ required string group_string = 17;
+ }
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+message NonPackedTest {
+ repeated int32 a = 1;
+}
+
+message PackedTest {
+ repeated int32 b = 1 [packed=true];
+}
+
+message MaxTag {
+ // Maximum possible tag number.
+ optional string last_field = 536870911;
+}
+
+message OldMessage {
+ message Nested {
+ optional string name = 1;
+ }
+ optional Nested nested = 1;
+
+ optional int32 num = 2;
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+message NewMessage {
+ message Nested {
+ optional string name = 1;
+ optional string food_group = 2;
+ }
+ optional Nested nested = 1;
+
+ // This is an int32 in OldMessage.
+ optional int64 num = 2;
+}
+
+// Smaller tests for ASCII formatting.
+
+message InnerMessage {
+ required string host = 1;
+ optional int32 port = 2 [default=4000];
+ optional bool connected = 3;
+}
+
+message OtherMessage {
+ optional int64 key = 1;
+ optional bytes value = 2;
+ optional float weight = 3;
+ optional InnerMessage inner = 4;
+
+ extensions 100 to max;
+}
+
+message RequiredInnerMessage {
+ required InnerMessage leo_finally_won_an_oscar = 1;
+}
+
+message MyMessage {
+ required int32 count = 1;
+ optional string name = 2;
+ optional string quote = 3;
+ repeated string pet = 4;
+ optional InnerMessage inner = 5;
+ repeated OtherMessage others = 6;
+ optional RequiredInnerMessage we_must_go_deeper = 13;
+ repeated InnerMessage rep_inner = 12;
+
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ };
+ optional Color bikeshed = 7;
+
+ optional group SomeGroup = 8 {
+ optional int32 group_field = 9;
+ }
+
+ // This field becomes [][]byte in the generated code.
+ repeated bytes rep_bytes = 10;
+
+ optional double bigfloat = 11;
+
+ extensions 100 to max;
+}
+
+message Ext {
+ extend MyMessage {
+ optional Ext more = 103;
+ optional string text = 104;
+ optional int32 number = 105;
+ }
+
+ optional string data = 1;
+}
+
+extend MyMessage {
+ repeated string greeting = 106;
+}
+
+message ComplexExtension {
+ optional int32 first = 1;
+ optional int32 second = 2;
+ repeated int32 third = 3;
+}
+
+extend OtherMessage {
+ optional ComplexExtension complex = 200;
+ repeated ComplexExtension r_complex = 201;
+}
+
+message DefaultsMessage {
+ enum DefaultsEnum {
+ ZERO = 0;
+ ONE = 1;
+ TWO = 2;
+ };
+ extensions 100 to max;
+}
+
+extend DefaultsMessage {
+ optional double no_default_double = 101;
+ optional float no_default_float = 102;
+ optional int32 no_default_int32 = 103;
+ optional int64 no_default_int64 = 104;
+ optional uint32 no_default_uint32 = 105;
+ optional uint64 no_default_uint64 = 106;
+ optional sint32 no_default_sint32 = 107;
+ optional sint64 no_default_sint64 = 108;
+ optional fixed32 no_default_fixed32 = 109;
+ optional fixed64 no_default_fixed64 = 110;
+ optional sfixed32 no_default_sfixed32 = 111;
+ optional sfixed64 no_default_sfixed64 = 112;
+ optional bool no_default_bool = 113;
+ optional string no_default_string = 114;
+ optional bytes no_default_bytes = 115;
+ optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
+
+ optional double default_double = 201 [default = 3.1415];
+ optional float default_float = 202 [default = 3.14];
+ optional int32 default_int32 = 203 [default = 42];
+ optional int64 default_int64 = 204 [default = 43];
+ optional uint32 default_uint32 = 205 [default = 44];
+ optional uint64 default_uint64 = 206 [default = 45];
+ optional sint32 default_sint32 = 207 [default = 46];
+ optional sint64 default_sint64 = 208 [default = 47];
+ optional fixed32 default_fixed32 = 209 [default = 48];
+ optional fixed64 default_fixed64 = 210 [default = 49];
+ optional sfixed32 default_sfixed32 = 211 [default = 50];
+ optional sfixed64 default_sfixed64 = 212 [default = 51];
+ optional bool default_bool = 213 [default = true];
+ optional string default_string = 214 [default = "Hello, string"];
+ optional bytes default_bytes = 215 [default = "Hello, bytes"];
+ optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
+}
+
+message MyMessageSet {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
+
+message Empty {
+}
+
+extend MyMessageSet {
+ optional Empty x201 = 201;
+ optional Empty x202 = 202;
+ optional Empty x203 = 203;
+ optional Empty x204 = 204;
+ optional Empty x205 = 205;
+ optional Empty x206 = 206;
+ optional Empty x207 = 207;
+ optional Empty x208 = 208;
+ optional Empty x209 = 209;
+ optional Empty x210 = 210;
+ optional Empty x211 = 211;
+ optional Empty x212 = 212;
+ optional Empty x213 = 213;
+ optional Empty x214 = 214;
+ optional Empty x215 = 215;
+ optional Empty x216 = 216;
+ optional Empty x217 = 217;
+ optional Empty x218 = 218;
+ optional Empty x219 = 219;
+ optional Empty x220 = 220;
+ optional Empty x221 = 221;
+ optional Empty x222 = 222;
+ optional Empty x223 = 223;
+ optional Empty x224 = 224;
+ optional Empty x225 = 225;
+ optional Empty x226 = 226;
+ optional Empty x227 = 227;
+ optional Empty x228 = 228;
+ optional Empty x229 = 229;
+ optional Empty x230 = 230;
+ optional Empty x231 = 231;
+ optional Empty x232 = 232;
+ optional Empty x233 = 233;
+ optional Empty x234 = 234;
+ optional Empty x235 = 235;
+ optional Empty x236 = 236;
+ optional Empty x237 = 237;
+ optional Empty x238 = 238;
+ optional Empty x239 = 239;
+ optional Empty x240 = 240;
+ optional Empty x241 = 241;
+ optional Empty x242 = 242;
+ optional Empty x243 = 243;
+ optional Empty x244 = 244;
+ optional Empty x245 = 245;
+ optional Empty x246 = 246;
+ optional Empty x247 = 247;
+ optional Empty x248 = 248;
+ optional Empty x249 = 249;
+ optional Empty x250 = 250;
+}
+
+message MessageList {
+ repeated group Message = 1 {
+ required string name = 2;
+ required int32 count = 3;
+ }
+}
+
+message Strings {
+ optional string string_field = 1;
+ optional bytes bytes_field = 2;
+}
+
+message Defaults {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ }
+
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ optional bool F_Bool = 1 [default=true];
+ optional int32 F_Int32 = 2 [default=32];
+ optional int64 F_Int64 = 3 [default=64];
+ optional fixed32 F_Fixed32 = 4 [default=320];
+ optional fixed64 F_Fixed64 = 5 [default=640];
+ optional uint32 F_Uint32 = 6 [default=3200];
+ optional uint64 F_Uint64 = 7 [default=6400];
+ optional float F_Float = 8 [default=314159.];
+ optional double F_Double = 9 [default=271828.];
+ optional string F_String = 10 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes = 11 [default="Bignose"];
+ optional sint32 F_Sint32 = 12 [default=-32];
+ optional sint64 F_Sint64 = 13 [default=-64];
+ optional Color F_Enum = 14 [default=GREEN];
+
+ // More fields with crazy defaults.
+ optional float F_Pinf = 15 [default=inf];
+ optional float F_Ninf = 16 [default=-inf];
+ optional float F_Nan = 17 [default=nan];
+
+ // Sub-message.
+ optional SubDefaults sub = 18;
+
+ // Redundant but explicit defaults.
+ optional string str_zero = 19 [default=""];
+}
+
+message SubDefaults {
+ optional int64 n = 1 [default=7];
+}
+
+message RepeatedEnum {
+ enum Color {
+ RED = 1;
+ }
+ repeated Color color = 1;
+}
+
+message MoreRepeated {
+ repeated bool bools = 1;
+ repeated bool bools_packed = 2 [packed=true];
+ repeated int32 ints = 3;
+ repeated int32 ints_packed = 4 [packed=true];
+ repeated int64 int64s_packed = 7 [packed=true];
+ repeated string strings = 5;
+ repeated fixed32 fixeds = 6;
+}
+
+// GroupOld and GroupNew have the same wire format.
+// GroupNew has a new field inside a group.
+
+message GroupOld {
+ optional group G = 101 {
+ optional int32 x = 2;
+ }
+}
+
+message GroupNew {
+ optional group G = 101 {
+ optional int32 x = 2;
+ optional int32 y = 3;
+ }
+}
+
+message FloatingPoint {
+ required double f = 1;
+ optional bool exact = 2;
+}
+
+message MessageWithMap {
+ map name_mapping = 1;
+ map msg_mapping = 2;
+ map byte_mapping = 3;
+ map str_to_str = 4;
+}
+
+message Oneof {
+ oneof union {
+ bool F_Bool = 1;
+ int32 F_Int32 = 2;
+ int64 F_Int64 = 3;
+ fixed32 F_Fixed32 = 4;
+ fixed64 F_Fixed64 = 5;
+ uint32 F_Uint32 = 6;
+ uint64 F_Uint64 = 7;
+ float F_Float = 8;
+ double F_Double = 9;
+ string F_String = 10;
+ bytes F_Bytes = 11;
+ sint32 F_Sint32 = 12;
+ sint64 F_Sint64 = 13;
+ MyMessage.Color F_Enum = 14;
+ GoTestField F_Message = 15;
+ group F_Group = 16 {
+ optional int32 x = 17;
+ }
+ int32 F_Largest_Tag = 536870911;
+ }
+
+ oneof tormato {
+ int32 value = 100;
+ }
+}
+
+message Communique {
+ optional bool make_me_cry = 1;
+
+ // This is a oneof, called "union".
+ oneof union {
+ int32 number = 5;
+ string name = 6;
+ bytes data = 7;
+ double temp_c = 8;
+ MyMessage.Color col = 9;
+ Strings msg = 10;
+ }
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text.go b/vendor/src/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..965876bf
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_parser.go b/vendor/src/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..61f83c1e
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
new file mode 100644
index 00000000..8f7cb4d2
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
@@ -0,0 +1,673 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "math"
+ "reflect"
+ "testing"
+
+ . "github.com/golang/protobuf/proto"
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ . "github.com/golang/protobuf/proto/testdata"
+)
+
+type UnmarshalTextTest struct {
+ in string
+ err string // if "", no error expected
+ out *MyMessage
+}
+
+func buildExtStructTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_More, &Ext{
+ Data: String("Hello, world!"),
+ })
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtDataTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_Text, String("Hello, world!"))
+ SetExtension(msg, E_Ext_Number, Int32(1729))
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtRepStringTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
+ panic(err)
+ }
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+var unMarshalTextTests = []UnmarshalTextTest{
+ // Basic
+ {
+ in: " count:42\n name:\"Dave\" ",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ },
+ },
+
+ // Empty quoted string
+ {
+ in: `count:42 name:""`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(""),
+ },
+ },
+
+ // Quoted string concatenation with double quotes
+ {
+ in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenation with single quotes
+ {
+ in: "count:42 name: 'My name is '\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string concatenations with mixed quotes
+ {
+ in: "count:42 name: 'My name is '\n\"elsewhere\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+ {
+ in: "count:42 name: \"My name is \"\n'elsewhere'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string with escaped apostrophe
+ {
+ in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("HOLIDAY - New Year's Day"),
+ },
+ },
+
+ // Quoted string with single quote
+ {
+ in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`Roger "The Ramster" Ramjet`),
+ },
+ },
+
+ // Quoted string with all the accepted special characters from the C++ test
+ {
+ in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
+ },
+ },
+
+ // Quoted string with quoted backslash
+ {
+ in: `count:42 name: "\\'xyz"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`\'xyz`),
+ },
+ },
+
+ // Quoted string with UTF-8 bytes.
+ {
+ in: "count:42 name: '\303\277\302\201\xAB'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\303\277\302\201\xAB"),
+ },
+ },
+
+ // Bad quoted string
+ {
+ in: `inner: < host: "\0" >` + "\n",
+ err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
+ },
+
+ // Number too large for int64
+ {
+ in: "count: 1 others { key: 123456789012345678901 }",
+ err: "line 1.23: invalid int64: 123456789012345678901",
+ },
+
+ // Number too large for int32
+ {
+ in: "count: 1234567890123",
+ err: "line 1.7: invalid int32: 1234567890123",
+ },
+
+ // Number in hexadecimal
+ {
+ in: "count: 0x2beef",
+ out: &MyMessage{
+ Count: Int32(0x2beef),
+ },
+ },
+
+ // Number in octal
+ {
+ in: "count: 024601",
+ out: &MyMessage{
+ Count: Int32(024601),
+ },
+ },
+
+ // Floating point number with "f" suffix
+ {
+ in: "count: 4 others:< weight: 17.0f >",
+ out: &MyMessage{
+ Count: Int32(4),
+ Others: []*OtherMessage{
+ {
+ Weight: Float32(17),
+ },
+ },
+ },
+ },
+
+ // Floating point positive infinity
+ {
+ in: "count: 4 bigfloat: inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(1)),
+ },
+ },
+
+ // Floating point negative infinity
+ {
+ in: "count: 4 bigfloat: -inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(-1)),
+ },
+ },
+
+ // Number too large for float32
+ {
+ in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
+ err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
+ },
+
+ // Number posing as a quoted string
+ {
+ in: `inner: < host: 12 >` + "\n",
+ err: `line 1.15: invalid string: 12`,
+ },
+
+ // Quoted string posing as int32
+ {
+ in: `count: "12"`,
+ err: `line 1.7: invalid int32: "12"`,
+ },
+
+ // Quoted string posing a float32
+ {
+ in: `others:< weight: "17.4" >`,
+ err: `line 1.17: invalid float32: "17.4"`,
+ },
+
+ // Enum
+ {
+ in: `count:42 bikeshed: BLUE`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Bikeshed: MyMessage_BLUE.Enum(),
+ },
+ },
+
+ // Repeated field
+ {
+ in: `count:42 pet: "horsey" pet:"bunny"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated field with list notation
+ {
+ in: `count:42 pet: ["horsey", "bunny"]`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated message with/without colon and <>/{}
+ {
+ in: `count:42 others:{} others{} others:<> others:{}`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Others: []*OtherMessage{
+ {},
+ {},
+ {},
+ {},
+ },
+ },
+ },
+
+ // Missing colon for inner message
+ {
+ in: `count:42 inner < host: "cauchy.syd" >`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("cauchy.syd"),
+ },
+ },
+ },
+
+ // Missing colon for string field
+ {
+ in: `name "Dave"`,
+ err: `line 1.5: expected ':', found "\"Dave\""`,
+ },
+
+ // Missing colon for int32 field
+ {
+ in: `count 42`,
+ err: `line 1.6: expected ':', found "42"`,
+ },
+
+ // Missing required field
+ {
+ in: `name: "Pawel"`,
+ err: `proto: required field "testdata.MyMessage.count" not set`,
+ out: &MyMessage{
+ Name: String("Pawel"),
+ },
+ },
+
+ // Missing required field in a required submessage
+ {
+ in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
+ err: `proto: required field "testdata.InnerMessage.host" not set`,
+ out: &MyMessage{
+ Count: Int32(42),
+ WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
+ },
+ },
+
+ // Repeated non-repeated field
+ {
+ in: `name: "Rob" name: "Russ"`,
+ err: `line 1.12: non-repeated field "name" was repeated`,
+ },
+
+ // Group
+ {
+ in: `count: 17 SomeGroup { group_field: 12 }`,
+ out: &MyMessage{
+ Count: Int32(17),
+ Somegroup: &MyMessage_SomeGroup{
+ GroupField: Int32(12),
+ },
+ },
+ },
+
+ // Semicolon between fields
+ {
+ in: `count:3;name:"Calvin"`,
+ out: &MyMessage{
+ Count: Int32(3),
+ Name: String("Calvin"),
+ },
+ },
+ // Comma between fields
+ {
+ in: `count:4,name:"Ezekiel"`,
+ out: &MyMessage{
+ Count: Int32(4),
+ Name: String("Ezekiel"),
+ },
+ },
+
+ // Boolean false
+ {
+ in: `count:42 inner { host: "example.com" connected: false }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean true
+ {
+ in: `count:42 inner { host: "example.com" connected: true }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean 0
+ {
+ in: `count:42 inner { host: "example.com" connected: 0 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean 1
+ {
+ in: `count:42 inner { host: "example.com" connected: 1 }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean f
+ {
+ in: `count:42 inner { host: "example.com" connected: f }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean t
+ {
+ in: `count:42 inner { host: "example.com" connected: t }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+ // Boolean False
+ {
+ in: `count:42 inner { host: "example.com" connected: False }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(false),
+ },
+ },
+ },
+ // Boolean True
+ {
+ in: `count:42 inner { host: "example.com" connected: True }`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("example.com"),
+ Connected: Bool(true),
+ },
+ },
+ },
+
+ // Extension
+ buildExtStructTest(`count: 42 [testdata.Ext.more]:`),
+ buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
+ buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
+ buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
+
+ // Big all-in-one
+ {
+ in: "count:42 # Meaning\n" +
+ `name:"Dave" ` +
+ `quote:"\"I didn't want to go.\"" ` +
+ `pet:"bunny" ` +
+ `pet:"kitty" ` +
+ `pet:"horsey" ` +
+ `inner:<` +
+ ` host:"footrest.syd" ` +
+ ` port:7001 ` +
+ ` connected:true ` +
+ `> ` +
+ `others:<` +
+ ` key:3735928559 ` +
+ ` value:"\x01A\a\f" ` +
+ `> ` +
+ `others:<` +
+ " weight:58.9 # Atomic weight of Co\n" +
+ ` inner:<` +
+ ` host:"lesha.mtv" ` +
+ ` port:8002 ` +
+ ` >` +
+ `>`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ Quote: String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &InnerMessage{
+ Host: String("footrest.syd"),
+ Port: Int32(7001),
+ Connected: Bool(true),
+ },
+ Others: []*OtherMessage{
+ {
+ Key: Int64(3735928559),
+ Value: []byte{0x1, 'A', '\a', '\f'},
+ },
+ {
+ Weight: Float32(58.9),
+ Inner: &InnerMessage{
+ Host: String("lesha.mtv"),
+ Port: Int32(8002),
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestUnmarshalText(t *testing.T) {
+ for i, test := range unMarshalTextTests {
+ pb := new(MyMessage)
+ err := UnmarshalText(test.in, pb)
+ if test.err == "" {
+ // We don't expect failure.
+ if err != nil {
+ t.Errorf("Test %d: Unexpected error: %v", i, err)
+ } else if !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ } else {
+ // We do expect failure.
+ if err == nil {
+ t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
+ } else if err.Error() != test.err {
+ t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
+ i, err.Error(), test.err)
+ } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ }
+ }
+}
+
+func TestUnmarshalTextCustomMessage(t *testing.T) {
+ msg := &textMessage{}
+ if err := UnmarshalText("custom", msg); err != nil {
+ t.Errorf("Unexpected error from custom unmarshal: %v", err)
+ }
+ if UnmarshalText("not custom", msg) == nil {
+ t.Errorf("Didn't get expected error from custom unmarshal")
+ }
+}
+
+// Regression test; this caused a panic.
+func TestRepeatedEnum(t *testing.T) {
+ pb := new(RepeatedEnum)
+ if err := UnmarshalText("color: RED", pb); err != nil {
+ t.Fatal(err)
+ }
+ exp := &RepeatedEnum{
+ Color: []RepeatedEnum_Color{RepeatedEnum_RED},
+ }
+ if !Equal(pb, exp) {
+ t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
+ }
+}
+
+func TestProto3TextParsing(t *testing.T) {
+ m := new(proto3pb.Message)
+ const in = `name: "Wallace" true_scotsman: true`
+ want := &proto3pb.Message{
+ Name: "Wallace",
+ TrueScotsman: true,
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestMapParsing(t *testing.T) {
+ m := new(MessageWithMap)
+ const in = `name_mapping: name_mapping:` +
+ `msg_mapping:,>` + // separating commas are okay
+ `msg_mapping>` + // no colon after "value"
+ `msg_mapping:>` + // omitted key
+ `msg_mapping:` + // omitted value
+ `byte_mapping:` +
+ `byte_mapping:<>` // omitted key and value
+ want := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Beatles",
+ 1234: "Feist",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ -4: {F: Float64(2.0)},
+ -2: {F: Float64(4.0)},
+ 0: {F: Float64(5.0)},
+ 1: nil,
+ },
+ ByteMapping: map[bool][]byte{
+ false: nil,
+ true: []byte("so be it"),
+ },
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestOneofParsing(t *testing.T) {
+ const in = `name:"Shrek"`
+ m := new(Communique)
+ want := &Communique{Union: &Communique_Name{"Shrek"}}
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+
+ const inOverwrite = `name:"Shrek" number:42`
+ m = new(Communique)
+ testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'"
+ if err := UnmarshalText(inOverwrite, m); err == nil {
+ t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr)
+ } else if err.Error() != testErr {
+ t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v",
+ err.Error(), testErr)
+ }
+
+}
+
+var benchInput string
+
+func init() {
+ benchInput = "count: 4\n"
+ for i := 0; i < 1000; i++ {
+ benchInput += "pet: \"fido\"\n"
+ }
+
+ // Check it is valid input.
+ pb := new(MyMessage)
+ err := UnmarshalText(benchInput, pb)
+ if err != nil {
+ panic("Bad benchmark input: " + err.Error())
+ }
+}
+
+func BenchmarkUnmarshalText(b *testing.B) {
+ pb := new(MyMessage)
+ for i := 0; i < b.N; i++ {
+ UnmarshalText(benchInput, pb)
+ }
+ b.SetBytes(int64(len(benchInput)))
+}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_test.go b/vendor/src/github.com/golang/protobuf/proto/text_test.go
new file mode 100644
index 00000000..3eabacac
--- /dev/null
+++ b/vendor/src/github.com/golang/protobuf/proto/text_test.go
@@ -0,0 +1,474 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "github.com/golang/protobuf/proto/proto3_proto"
+ pb "github.com/golang/protobuf/proto/testdata"
+)
+
+// textMessage implements the methods that allow it to marshal and unmarshal
+// itself as text.
+type textMessage struct {
+}
+
+func (*textMessage) MarshalText() ([]byte, error) {
+ return []byte("custom"), nil
+}
+
+func (*textMessage) UnmarshalText(bytes []byte) error {
+ if string(bytes) != "custom" {
+ return errors.New("expected 'custom'")
+ }
+ return nil
+}
+
+func (*textMessage) Reset() {}
+func (*textMessage) String() string { return "" }
+func (*textMessage) ProtoMessage() {}
+
+func newTestMessage() *pb.MyMessage {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Quote: proto.String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("footrest.syd"),
+ Port: proto.Int32(7001),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(0xdeadbeef),
+ Value: []byte{1, 65, 7, 12},
+ },
+ {
+ Weight: proto.Float32(6.022),
+ Inner: &pb.InnerMessage{
+ Host: proto.String("lesha.mtv"),
+ Port: proto.Int32(8002),
+ },
+ },
+ },
+ Bikeshed: pb.MyMessage_BLUE.Enum(),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(8),
+ },
+ // One normally wouldn't do this.
+ // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
+ XXX_unrecognized: []byte{13<<3 | 0, 4},
+ }
+ ext := &pb.Ext{
+ Data: proto.String("Big gobs for big rats"),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
+ panic(err)
+ }
+ greetings := []string{"adg", "easy", "cow"}
+ if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
+ panic(err)
+ }
+
+ // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
+ b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
+ if err != nil {
+ panic(err)
+ }
+ b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
+ proto.SetRawExtension(msg, 201, b)
+
+ // Extensions can be plain fields, too, so let's test that.
+ b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
+ proto.SetRawExtension(msg, 202, b)
+
+ return msg
+}
+
+const text = `count: 42
+name: "Dave"
+quote: "\"I didn't want to go.\""
+pet: "bunny"
+pet: "kitty"
+pet: "horsey"
+inner: <
+ host: "footrest.syd"
+ port: 7001
+ connected: true
+>
+others: <
+ key: 3735928559
+ value: "\001A\007\014"
+>
+others: <
+ weight: 6.022
+ inner: <
+ host: "lesha.mtv"
+ port: 8002
+ >
+>
+bikeshed: BLUE
+SomeGroup {
+ group_field: 8
+}
+/* 2 unknown bytes */
+13: 4
+[testdata.Ext.more]: <
+ data: "Big gobs for big rats"
+>
+[testdata.greeting]: "adg"
+[testdata.greeting]: "easy"
+[testdata.greeting]: "cow"
+/* 13 unknown bytes */
+201: "\t3G skiing"
+/* 3 unknown bytes */
+202: 19
+`
+
+func TestMarshalText(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, newTestMessage()); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != text {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
+ }
+}
+
+func TestMarshalTextCustomMessage(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, &textMessage{}); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != "custom" {
+ t.Errorf("Got %q, expected %q", s, "custom")
+ }
+}
+func TestMarshalTextNil(t *testing.T) {
+ want := ""
+ tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
+ for i, test := range tests {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, test); err != nil {
+ t.Fatal(err)
+ }
+ if got := buf.String(); got != want {
+ t.Errorf("%d: got %q want %q", i, got, want)
+ }
+ }
+}
+
+func TestMarshalTextUnknownEnum(t *testing.T) {
+ // The Color enum only specifies values 0-2.
+ m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
+ got := m.String()
+ const want = `bikeshed:3 `
+ if got != want {
+ t.Errorf("\n got %q\nwant %q", got, want)
+ }
+}
+
+func TestTextOneof(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&pb.Communique{}, ``},
+ // scalar field
+ {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
+ // message field
+ {&pb.Communique{Union: &pb.Communique_Msg{
+ &pb.Strings{StringField: proto.String("why hello!")},
+ }}, `msg:`},
+ // bad oneof (should not panic)
+ {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
+
+func BenchmarkMarshalTextBuffered(b *testing.B) {
+ buf := new(bytes.Buffer)
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ proto.MarshalText(buf, m)
+ }
+}
+
+func BenchmarkMarshalTextUnbuffered(b *testing.B) {
+ w := ioutil.Discard
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ proto.MarshalText(w, m)
+ }
+}
+
+func compact(src string) string {
+ // s/[ \n]+/ /g; s/ $//;
+ dst := make([]byte, len(src))
+ space, comment := false, false
+ j := 0
+ for i := 0; i < len(src); i++ {
+ if strings.HasPrefix(src[i:], "/*") {
+ comment = true
+ i++
+ continue
+ }
+ if comment && strings.HasPrefix(src[i:], "*/") {
+ comment = false
+ i++
+ continue
+ }
+ if comment {
+ continue
+ }
+ c := src[i]
+ if c == ' ' || c == '\n' {
+ space = true
+ continue
+ }
+ if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
+ space = false
+ }
+ if c == '{' {
+ space = false
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ space = false
+ }
+ dst[j] = c
+ j++
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ }
+ return string(dst[0:j])
+}
+
+var compactText = compact(text)
+
+func TestCompactText(t *testing.T) {
+ s := proto.CompactTextString(newTestMessage())
+ if s != compactText {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
+ }
+}
+
+func TestStringEscaping(t *testing.T) {
+ testCases := []struct {
+ in *pb.Strings
+ out string
+ }{
+ {
+ // Test data from C++ test (TextFormatTest.StringEscape).
+ // Single divergence: we don't escape apostrophes.
+ &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
+ "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
+ },
+ {
+ // Test data from the same C++ test.
+ &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
+ "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
+ },
+ {
+ // Some UTF-8.
+ &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
+ `string_field: "\000\001\377\201"` + "\n",
+ },
+ }
+
+ for i, tc := range testCases {
+ var buf bytes.Buffer
+ if err := proto.MarshalText(&buf, tc.in); err != nil {
+ t.Errorf("proto.MarsalText: %v", err)
+ continue
+ }
+ s := buf.String()
+ if s != tc.out {
+ t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
+ continue
+ }
+
+ // Check round-trip.
+ pb := new(pb.Strings)
+ if err := proto.UnmarshalText(s, pb); err != nil {
+ t.Errorf("#%d: UnmarshalText: %v", i, err)
+ continue
+ }
+ if !proto.Equal(pb, tc.in) {
+ t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
+ }
+ }
+}
+
+// A limitedWriter accepts some output before it fails.
+// This is a proxy for something like a nearly-full or imminently-failing disk,
+// or a network connection that is about to die.
+type limitedWriter struct {
+ b bytes.Buffer
+ limit int
+}
+
+var outOfSpace = errors.New("proto: insufficient space")
+
+func (w *limitedWriter) Write(p []byte) (n int, err error) {
+ var avail = w.limit - w.b.Len()
+ if avail <= 0 {
+ return 0, outOfSpace
+ }
+ if len(p) <= avail {
+ return w.b.Write(p)
+ }
+ n, _ = w.b.Write(p[:avail])
+ return n, outOfSpace
+}
+
+func TestMarshalTextFailing(t *testing.T) {
+ // Try lots of different sizes to exercise more error code-paths.
+ for lim := 0; lim < len(text); lim++ {
+ buf := new(limitedWriter)
+ buf.limit = lim
+ err := proto.MarshalText(buf, newTestMessage())
+ // We expect a certain error, but also some partial results in the buffer.
+ if err != outOfSpace {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
+ }
+ s := buf.b.String()
+ x := text[:buf.limit]
+ if s != x {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
+ }
+ }
+}
+
+func TestFloats(t *testing.T) {
+ tests := []struct {
+ f float64
+ want string
+ }{
+ {0, "0"},
+ {4.7, "4.7"},
+ {math.Inf(1), "inf"},
+ {math.Inf(-1), "-inf"},
+ {math.NaN(), "nan"},
+ }
+ for _, test := range tests {
+ msg := &pb.FloatingPoint{F: &test.f}
+ got := strings.TrimSpace(msg.String())
+ want := `f:` + test.want
+ if got != want {
+ t.Errorf("f=%f: got %q, want %q", test.f, got, want)
+ }
+ }
+}
+
+func TestRepeatedNilText(t *testing.T) {
+ m := &pb.MessageList{
+ Message: []*pb.MessageList_Message{
+ nil,
+ &pb.MessageList_Message{
+ Name: proto.String("Horse"),
+ },
+ nil,
+ },
+ }
+ want := `Message
+Message {
+ name: "Horse"
+}
+Message
+`
+ if s := proto.MarshalTextString(m); s != want {
+ t.Errorf(" got: %s\nwant: %s", s, want)
+ }
+}
+
+func TestProto3Text(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&proto3pb.Message{}, ``},
+ // zero message except for an empty byte slice
+ {&proto3pb.Message{Data: []byte{}}, ``},
+ // trivial case
+ {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
+ // empty map
+ {&pb.MessageWithMap{}, ``},
+ // non-empty map; map format is the same as a repeated struct,
+ // and they are sorted by key (numerically for numeric keys).
+ {
+ &pb.MessageWithMap{NameMapping: map[int32]string{
+ -1: "Negatory",
+ 7: "Lucky",
+ 1234: "Feist",
+ 6345789: "Otis",
+ }},
+ `name_mapping: ` +
+ `name_mapping: ` +
+ `name_mapping: ` +
+ `name_mapping:`,
+ },
+ // map with nil value; not well-defined, but we shouldn't crash
+ {
+ &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
+ `msg_mapping:`,
+ },
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
diff --git a/vendor/src/github.com/matrix-org/util/LICENSE b/vendor/src/github.com/matrix-org/util/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/matrix-org/util/README.md b/vendor/src/github.com/matrix-org/util/README.md
new file mode 100644
index 00000000..319e4b5d
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/README.md
@@ -0,0 +1,7 @@
+# util
+
+[![GoDoc](https://godoc.org/github.com/matrix-org/util?status.svg)](https://godoc.org/github.com/matrix-org/util)
+[![Build Status](https://travis-ci.org/matrix-org/util.svg?branch=master)](https://travis-ci.org/matrix-org/util)
+[![Coverage Status](https://coveralls.io/repos/github/matrix-org/util/badge.svg)](https://coveralls.io/github/matrix-org/util)
+
+A loose collection of Golang functions that we use at matrix.org
diff --git a/vendor/src/github.com/matrix-org/util/error.go b/vendor/src/github.com/matrix-org/util/error.go
new file mode 100644
index 00000000..9d40c57b
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/error.go
@@ -0,0 +1,18 @@
+package util
+
+import "fmt"
+
+// HTTPError An HTTP Error response, which may wrap an underlying native Go Error.
+type HTTPError struct {
+ WrappedError error
+ Message string
+ Code int
+}
+
+func (e HTTPError) Error() string {
+ var wrappedErrMsg string
+ if e.WrappedError != nil {
+ wrappedErrMsg = e.WrappedError.Error()
+ }
+ return fmt.Sprintf("%s: %d: %s", e.Message, e.Code, wrappedErrMsg)
+}
diff --git a/vendor/src/github.com/matrix-org/util/hooks/install.sh b/vendor/src/github.com/matrix-org/util/hooks/install.sh
new file mode 100644
index 00000000..f8aa331f
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/hooks/install.sh
@@ -0,0 +1,5 @@
+#! /bin/bash
+
+DOT_GIT="$(dirname $0)/../.git"
+
+ln -s "../../hooks/pre-commit" "$DOT_GIT/hooks/pre-commit"
\ No newline at end of file
diff --git a/vendor/src/github.com/matrix-org/util/hooks/pre-commit b/vendor/src/github.com/matrix-org/util/hooks/pre-commit
new file mode 100644
index 00000000..41df674d
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/hooks/pre-commit
@@ -0,0 +1,9 @@
+#! /bin/bash
+
+set -eu
+
+golint
+go fmt
+go tool vet --all --shadow .
+gocyclo -over 12 .
+go test -timeout 5s -test.v
diff --git a/vendor/src/github.com/matrix-org/util/json.go b/vendor/src/github.com/matrix-org/util/json.go
new file mode 100644
index 00000000..4277a063
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/json.go
@@ -0,0 +1,154 @@
+package util
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "net/http"
+ "runtime/debug"
+
+ log "github.com/Sirupsen/logrus"
+)
+
+// ContextKeys is a type alias for string to namespace Context keys per-package.
+type ContextKeys string
+
+// CtxValueLogger is the key to extract the logrus Logger.
+const CtxValueLogger = ContextKeys("logger")
+
+// JSONRequestHandler represents an interface that must be satisfied in order to respond to incoming
+// HTTP requests with JSON. The interface returned will be marshalled into JSON to be sent to the client,
+// unless the interface is []byte in which case the bytes are sent to the client unchanged.
+// If an error is returned, a JSON error response will also be returned, unless the error code
+// is a 302 REDIRECT in which case a redirect is sent based on the Message field.
+type JSONRequestHandler interface {
+ OnIncomingRequest(req *http.Request) (interface{}, *HTTPError)
+}
+
+// JSONError represents a JSON API error response
+type JSONError struct {
+ Message string `json:"message"`
+}
+
+// Protect panicking HTTP requests from taking down the entire process, and log them using
+// the correct logger, returning a 500 with a JSON response rather than abruptly closing the
+// connection. The http.Request MUST have a CtxValueLogger.
+func Protect(handler http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, req *http.Request) {
+ defer func() {
+ if r := recover(); r != nil {
+ logger := req.Context().Value(CtxValueLogger).(*log.Entry)
+ logger.WithFields(log.Fields{
+ "panic": r,
+ }).Errorf(
+ "Request panicked!\n%s", debug.Stack(),
+ )
+ jsonErrorResponse(
+ w, req, &HTTPError{nil, "Internal Server Error", 500},
+ )
+ }
+ }()
+ handler(w, req)
+ }
+}
+
+// MakeJSONAPI creates an HTTP handler which always responds to incoming requests with JSON responses.
+// Incoming http.Requests will have a logger (with a request ID/method/path logged) attached to the Context.
+// This can be accessed via the const CtxValueLogger. The type of the logger is *log.Entry from github.com/Sirupsen/logrus
+func MakeJSONAPI(handler JSONRequestHandler) http.HandlerFunc {
+ return Protect(func(w http.ResponseWriter, req *http.Request) {
+ // Set a Logger on the context
+ ctx := context.WithValue(req.Context(), CtxValueLogger, log.WithFields(log.Fields{
+ "req.method": req.Method,
+ "req.path": req.URL.Path,
+ "req.id": RandomString(12),
+ }))
+ req = req.WithContext(ctx)
+
+ logger := req.Context().Value(CtxValueLogger).(*log.Entry)
+ logger.Print("Incoming request")
+
+ res, httpErr := handler.OnIncomingRequest(req)
+
+ // Set common headers returned regardless of the outcome of the request
+ w.Header().Set("Content-Type", "application/json")
+ SetCORSHeaders(w)
+
+ if httpErr != nil {
+ jsonErrorResponse(w, req, httpErr)
+ return
+ }
+
+ // if they've returned bytes as the response, then just return them rather than marshalling as JSON.
+ // This gives handlers an escape hatch if they want to return cached bytes.
+ var resBytes []byte
+ resBytes, ok := res.([]byte)
+ if !ok {
+ r, err := json.Marshal(res)
+ if err != nil {
+ jsonErrorResponse(w, req, &HTTPError{nil, "Failed to serialise response as JSON", 500})
+ return
+ }
+ resBytes = r
+ }
+ logger.Print(fmt.Sprintf("Responding (%d bytes)", len(resBytes)))
+ w.Write(resBytes)
+ })
+}
+
+func jsonErrorResponse(w http.ResponseWriter, req *http.Request, httpErr *HTTPError) {
+ logger := req.Context().Value(CtxValueLogger).(*log.Entry)
+ if httpErr.Code == 302 {
+ logger.WithField("err", httpErr.Error()).Print("Redirecting")
+ http.Redirect(w, req, httpErr.Message, 302)
+ return
+ }
+ logger.WithFields(log.Fields{
+ log.ErrorKey: httpErr,
+ }).Print("Responding with error")
+
+ w.WriteHeader(httpErr.Code) // Set response code
+
+ r, err := json.Marshal(&JSONError{
+ Message: httpErr.Message,
+ })
+ if err != nil {
+ // We should never fail to marshal the JSON error response, but in this event just skip
+ // marshalling altogether
+ logger.Warn("Failed to marshal error response")
+ w.Write([]byte(`{}`))
+ return
+ }
+ w.Write(r)
+}
+
+// WithCORSOptions intercepts all OPTIONS requests and responds with CORS headers. The request handler
+// is not invoked when this happens.
+func WithCORSOptions(handler http.HandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, req *http.Request) {
+ if req.Method == "OPTIONS" {
+ SetCORSHeaders(w)
+ return
+ }
+ handler(w, req)
+ }
+}
+
+// SetCORSHeaders sets unrestricted origin Access-Control headers on the response writer
+func SetCORSHeaders(w http.ResponseWriter) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
+ w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
+}
+
+const alphanumerics = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+
+// RandomString generates a pseudo-random string of length n.
+func RandomString(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = alphanumerics[rand.Int63()%int64(len(alphanumerics))]
+ }
+ return string(b)
+}
diff --git a/vendor/src/github.com/matrix-org/util/json_test.go b/vendor/src/github.com/matrix-org/util/json_test.go
new file mode 100644
index 00000000..203fa708
--- /dev/null
+++ b/vendor/src/github.com/matrix-org/util/json_test.go
@@ -0,0 +1,99 @@
+package util
+
+import (
+ "context"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ log "github.com/Sirupsen/logrus"
+)
+
+type MockJSONRequestHandler struct {
+ handler func(req *http.Request) (interface{}, *HTTPError)
+}
+
+func (h *MockJSONRequestHandler) OnIncomingRequest(req *http.Request) (interface{}, *HTTPError) {
+ return h.handler(req)
+}
+
+type MockResponse struct {
+ Foo string `json:"foo"`
+}
+
+func TestMakeJSONAPI(t *testing.T) {
+ log.SetLevel(log.PanicLevel) // suppress logs in test output
+ tests := []struct {
+ Return interface{}
+ Err *HTTPError
+ ExpectCode int
+ ExpectJSON string
+ }{
+ {nil, &HTTPError{nil, "Everything is broken", 500}, 500, `{"message":"Everything is broken"}`}, // Error return values
+ {nil, &HTTPError{nil, "Not here", 404}, 404, `{"message":"Not here"}`}, // With different status codes
+ {&MockResponse{"yep"}, nil, 200, `{"foo":"yep"}`}, // Success return values
+ {[]MockResponse{{"yep"}, {"narp"}}, nil, 200, `[{"foo":"yep"},{"foo":"narp"}]`}, // Top-level array success values
+ {[]byte(`actually bytes`), nil, 200, `actually bytes`}, // raw []byte escape hatch
+ {func(cannotBe, marshalled string) {}, nil, 500, `{"message":"Failed to serialise response as JSON"}`}, // impossible marshal
+ }
+
+ for _, tst := range tests {
+ mock := MockJSONRequestHandler{func(req *http.Request) (interface{}, *HTTPError) {
+ return tst.Return, tst.Err
+ }}
+ mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
+ mockWriter := httptest.NewRecorder()
+ handlerFunc := MakeJSONAPI(&mock)
+ handlerFunc(mockWriter, mockReq)
+ if mockWriter.Code != tst.ExpectCode {
+ t.Errorf("TestMakeJSONAPI wanted HTTP status %d, got %d", tst.ExpectCode, mockWriter.Code)
+ }
+ actualBody := mockWriter.Body.String()
+ if actualBody != tst.ExpectJSON {
+ t.Errorf("TestMakeJSONAPI wanted body '%s', got '%s'", tst.ExpectJSON, actualBody)
+ }
+ }
+}
+
+func TestMakeJSONAPIRedirect(t *testing.T) {
+ log.SetLevel(log.PanicLevel) // suppress logs in test output
+ mock := MockJSONRequestHandler{func(req *http.Request) (interface{}, *HTTPError) {
+ return nil, &HTTPError{nil, "https://matrix.org", 302}
+ }}
+ mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
+ mockWriter := httptest.NewRecorder()
+ handlerFunc := MakeJSONAPI(&mock)
+ handlerFunc(mockWriter, mockReq)
+ if mockWriter.Code != 302 {
+ t.Errorf("TestMakeJSONAPIRedirect wanted HTTP status 302, got %d", mockWriter.Code)
+ }
+ location := mockWriter.Header().Get("Location")
+ if location != "https://matrix.org" {
+ t.Errorf("TestMakeJSONAPIRedirect wanted Location header 'https://matrix.org', got '%s'", location)
+ }
+}
+
+func TestProtect(t *testing.T) {
+ log.SetLevel(log.PanicLevel) // suppress logs in test output
+ mockWriter := httptest.NewRecorder()
+ mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
+ mockReq = mockReq.WithContext(
+ context.WithValue(mockReq.Context(), CtxValueLogger, log.WithField("test", "yep")),
+ )
+ h := Protect(func(w http.ResponseWriter, req *http.Request) {
+ panic("oh noes!")
+ })
+
+ h(mockWriter, mockReq)
+
+ expectCode := 500
+ if mockWriter.Code != expectCode {
+ t.Errorf("TestProtect wanted HTTP status %d, got %d", expectCode, mockWriter.Code)
+ }
+
+ expectBody := `{"message":"Internal Server Error"}`
+ actualBody := mockWriter.Body.String()
+ if actualBody != expectBody {
+ t.Errorf("TestProtect wanted body %s, got %s", expectBody, actualBody)
+ }
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 00000000..81be2143
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
new file mode 100644
index 00000000..a793c885
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go
@@ -0,0 +1,178 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ . "github.com/matttproud/golang_protobuf_extensions/testdata"
+)
+
+func TestWriteDelimited(t *testing.T) {
+ t.Parallel()
+ for _, test := range []struct {
+ msg proto.Message
+ buf []byte
+ n int
+ err error
+ }{
+ {
+ msg: &Empty{},
+ n: 1,
+ buf: []byte{0},
+ },
+ {
+ msg: &GoEnum{Foo: FOO_FOO1.Enum()},
+ n: 3,
+ buf: []byte{2, 8, 1},
+ },
+ {
+ msg: &Strings{
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
+the encoding size of a single byte varint. We are using it to fuzz test the
+correctness of the header decoding mechanisms, which may prove problematic.
+I expect it may. Let's hope you enjoy testing as much as we do.`),
+ },
+ n: 271,
+ buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
+ 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
+ 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
+ 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
+ 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
+ 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
+ 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
+ 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
+ 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
+ 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
+ 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
+ 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
+ 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
+ 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
+ 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
+ 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
+ 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
+ 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
+ },
+ } {
+ var buf bytes.Buffer
+ if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err {
+ t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err)
+ }
+ if out := buf.Bytes(); !bytes.Equal(out, test.buf) {
+ t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf)
+ }
+ }
+}
+
+func TestReadDelimited(t *testing.T) {
+ t.Parallel()
+ for _, test := range []struct {
+ buf []byte
+ msg proto.Message
+ n int
+ err error
+ }{
+ {
+ buf: []byte{0},
+ msg: &Empty{},
+ n: 1,
+ },
+ {
+ n: 3,
+ buf: []byte{2, 8, 1},
+ msg: &GoEnum{Foo: FOO_FOO1.Enum()},
+ },
+ {
+ buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,
+ 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,
+ 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,
+ 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,
+ 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,
+ 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,
+ 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,
+ 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,
+ 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,
+ 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,
+ 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,
+ 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,
+ 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,
+ 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,
+ 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,
+ 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,
+ 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,
+ 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},
+ msg: &Strings{
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
+the encoding size of a single byte varint. We are using it to fuzz test the
+correctness of the header decoding mechanisms, which may prove problematic.
+I expect it may. Let's hope you enjoy testing as much as we do.`),
+ },
+ n: 271,
+ },
+ } {
+ msg := proto.Clone(test.msg)
+ msg.Reset()
+ if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err {
+ t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err)
+ }
+ if !proto.Equal(msg, test.msg) {
+ t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg)
+ }
+ }
+}
+
+func TestEndToEndValid(t *testing.T) {
+ t.Parallel()
+ for _, test := range [][]proto.Message{
+ {&Empty{}},
+ {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},
+ {&GoEnum{Foo: FOO_FOO1.Enum()}},
+ {&Strings{
+ StringField: proto.String(`This is my gigantic, unhappy string. It exceeds
+the encoding size of a single byte varint. We are using it to fuzz test the
+correctness of the header decoding mechanisms, which may prove problematic.
+I expect it may. Let's hope you enjoy testing as much as we do.`),
+ }},
+ } {
+ var buf bytes.Buffer
+ var written int
+ for i, msg := range test {
+ n, err := WriteDelimited(&buf, msg)
+ if err != nil {
+ // Assumption: TestReadDelimited and TestWriteDelimited are sufficient
+ // and inputs for this test are explicitly exercised there.
+ t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err)
+ }
+ written += n
+ }
+ var read int
+ for i, msg := range test {
+ out := proto.Clone(msg)
+ out.Reset()
+ n, _ := ReadDelimited(&buf, out)
+ // Decide to do EOF checking?
+ read += n
+ if !proto.Equal(out, msg) {
+ t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg)
+ }
+ }
+ if read != written {
+ t.Fatalf("%v read = %d; want %d", test, read, written)
+ }
+ }
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 00000000..258c0636
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ var headerBuf [binary.MaxVarintLen32]byte
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go
new file mode 100644
index 00000000..364a7b79
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go
@@ -0,0 +1,99 @@
+// Copyright 2016 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "bytes"
+ "io"
+ "testing"
+ "testing/iotest"
+)
+
+func TestReadDelimitedIllegalVarint(t *testing.T) {
+ t.Parallel()
+ var tests = []struct {
+ in []byte
+ n int
+ err error
+ }{
+ {
+ in: []byte{255, 255, 255, 255, 255},
+ n: 5,
+ err: errInvalidVarint,
+ },
+ {
+ in: []byte{255, 255, 255, 255, 255, 255},
+ n: 5,
+ err: errInvalidVarint,
+ },
+ }
+ for _, test := range tests {
+ n, err := ReadDelimited(bytes.NewReader(test.in), nil)
+ if got, want := n, test.n; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want)
+ }
+ if got, want := err, test.err; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want)
+ }
+ }
+}
+
+func TestReadDelimitedPrematureHeader(t *testing.T) {
+ t.Parallel()
+ var data = []byte{128, 5} // 256 + 256 + 128
+ n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil)
+ if got, want := n, 1; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want)
+ }
+ if got, want := err, io.EOF; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want)
+ }
+}
+
+func TestReadDelimitedPrematureBody(t *testing.T) {
+ t.Parallel()
+ var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
+ n, err := ReadDelimited(bytes.NewReader(data[:]), nil)
+ if got, want := n, 5; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want)
+ }
+ if got, want := err, io.ErrUnexpectedEOF; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want)
+ }
+}
+
+func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) {
+ t.Parallel()
+ var data = []byte{128, 5} // 256 + 256 + 128
+ n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil)
+ if got, want := n, 1; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want)
+ }
+ if got, want := err, io.EOF; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want)
+ }
+}
+
+func TestReadDelimitedPrematureBodyIncremental(t *testing.T) {
+ t.Parallel()
+ var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128
+ n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil)
+ if got, want := n, 5; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want)
+ }
+ if got, want := err, io.ErrUnexpectedEOF; got != want {
+ t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want)
+ }
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 00000000..c318385c
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 00000000..8fb59ad2
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ var buf [binary.MaxVarintLen32]byte
+ encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go
new file mode 100644
index 00000000..f92632b0
--- /dev/null
+++ b/vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go
@@ -0,0 +1,67 @@
+// Copyright 2016 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errMarshal = errors.New("pbutil: can't marshal")
+
+type cantMarshal struct{ proto.Message }
+
+func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal }
+
+var _ proto.Message = cantMarshal{}
+
+func TestWriteDelimitedMarshalErr(t *testing.T) {
+ t.Parallel()
+ var data cantMarshal
+ var buf bytes.Buffer
+ n, err := WriteDelimited(&buf, data)
+ if got, want := n, 0; got != want {
+ t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want)
+ }
+ if got, want := err, errMarshal; got != want {
+ t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want)
+ }
+}
+
+type canMarshal struct{ proto.Message }
+
+func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil }
+
+var errWrite = errors.New("pbutil: can't write")
+
+type cantWrite struct{}
+
+func (cantWrite) Write([]byte) (int, error) { return 0, errWrite }
+
+func TestWriteDelimitedWriteErr(t *testing.T) {
+ t.Parallel()
+ var data canMarshal
+ var buf cantWrite
+ n, err := WriteDelimited(buf, data)
+ if got, want := n, 0; got != want {
+ t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want)
+ }
+ if got, want := err, errWrite; got != want {
+ t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/AUTHORS.md b/vendor/src/github.com/prometheus/client_golang/AUTHORS.md
new file mode 100644
index 00000000..ab658b35
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/AUTHORS.md
@@ -0,0 +1,11 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein
+
+More than [30 individuals][1] have contributed to this repository. Please refer
+to the Git commit log for a complete list.
+
+[1]: https://github.com/prometheus/client_golang/graphs/contributors
diff --git a/vendor/src/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/src/github.com/prometheus/client_golang/CHANGELOG.md
new file mode 100644
index 00000000..330788a4
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/CHANGELOG.md
@@ -0,0 +1,109 @@
+## 0.8.0 / 2016-08-17
+* [CHANGE] Registry is doing more consistency checks. This might break
+ existing setups that used to export inconsistent metrics.
+* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow
+ arbitrary grouping.
+* [CHANGE] Removed `SelfCollector`.
+* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods.
+* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`,
+ `extraction`.
+* [CHANGE] Deprecated a number of functions.
+* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer`
+ interfaces.
+* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package
+ `promhttp`) and enabling the creation of other exposition mechanisms.
+* [FEATURE] `MustRegister` is variadic now, allowing registration of many
+ collectors in one call.
+* [FEATURE] Added HTTP API v1 package.
+* [ENHANCEMENT] Numerous documentation improvements.
+* [ENHANCEMENT] Improved metric sorting.
+* [ENHANCEMENT] Inlined fnv64a hashing for improved performance.
+* [ENHANCEMENT] Several test improvements.
+* [BUGFIX] Handle collisions in MetricVec.
+
+## 0.7.0 / 2015-07-27
+* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix.
+* [BUGFIX] Closed gaps in metric consistency check.
+* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling.
+* [ENHANCEMENT] Document the possibility to create "empty" metrics in
+ a metric vector.
+* [ENHANCEMENT] Fix and clarify various doc comments and the README.md.
+* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler.
+* [ENHANCEMENT] Change responseWriterDelegator.written to int64.
+
+## 0.6.0 / 2015-06-01
+* [CHANGE] Rename process_goroutines to go_goroutines.
+* [ENHANCEMENT] Validate label names during YAML decoding.
+* [ENHANCEMENT] Add LabelName regular expression.
+* [BUGFIX] Ensure alignment of struct members for 32-bit systems.
+
+## 0.5.0 / 2015-05-06
+* [BUGFIX] Removed a weakness in the fingerprinting aka signature code.
+ This makes fingerprinting slower and more allocation-heavy, but the
+ weakness was too severe to be tolerated.
+* [CHANGE] As a result of the above, Metric.Fingerprint is now returning
+ a different fingerprint. To keep the same fingerprint, the new method
+ Metric.FastFingerprint was introduced, which will be used by the
+ Prometheus server for storage purposes (implying that a collision
+ detection has to be added, too).
+* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on
+ fingerprinting anymore, removing the possibility of an undetected
+ fingerprint collision.
+* [FEATURE] The Go collector in the exposition library includes garbage
+ collection stats.
+* [FEATURE] The exposition library allows to create constant "throw-away"
+ summaries and histograms.
+* [CHANGE] A number of new reserved labels and prefixes.
+
+## 0.4.0 / 2015-04-08
+* [CHANGE] Return NaN when Summaries have no observations yet.
+* [BUGFIX] Properly handle Summary decay upon Write().
+* [BUGFIX] Fix the documentation link to the consumption library.
+* [FEATURE] Allow the metric family injection hook to merge with existing
+ metric families.
+* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs.
+* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions.
+
+## 0.3.2 / 2015-03-11
+* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is
+ only used by the Prometheus server internally.
+* [CLEANUP] Added licenses of vendored code left out by godep.
+
+## 0.3.1 / 2015-03-04
+* [ENHANCEMENT] Switched fingerprinting functions from own free list to
+ sync.Pool.
+* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests).
+
+## 0.3.0 / 2015-03-03
+* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL
+ PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS
+ VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE.
+* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was
+ arguably broken.)
+* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If
+ client_golang is used as a library, the vendoring will stay out of your way.
+* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made
+ the fingerprinting change above necessary.)
+* [FEATURE] Added new fingerprinting functions SignatureForLabels and
+ SignatureWithoutLabels to be used by the Prometheus server. These functions
+ require fewer allocations than the ones currently used by the server.
+
+## 0.2.0 / 2015-02-23
+* [FEATURE] Introduce new Histagram metric type.
+* [CHANGE] Ignore process collector errors for now (better error handling
+ pending).
+* [CHANGE] Use clear error interface for process pidFn.
+* [BUGFIX] Fix Go download links for several archs and OSes.
+* [ENHANCEMENT] Massively improve Gauge and Counter performance.
+* [ENHANCEMENT] Catch illegal label names for summaries in histograms.
+* [ENHANCEMENT] Reduce allocations during fingerprinting.
+* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if
+ both cgo is available and the build is for an OS with procfs.
+* [CLEANUP] Clean up code style issues.
+* [CLEANUP] Mark slow test as such and exclude them from travis.
+* [CLEANUP] Update protobuf library package name.
+* [CLEANUP] Updated vendoring of beorn7/perks.
+
+## 0.1.0 / 2015-02-02
+* [CLEANUP] Introduced semantic versioning and changelog. From now on,
+ changes will be reported in this file.
diff --git a/vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md
new file mode 100644
index 00000000..5705f0fb
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/src/github.com/prometheus/client_golang/LICENSE b/vendor/src/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/client_golang/NOTICE b/vendor/src/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 00000000..dd878a30
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/src/github.com/prometheus/client_golang/README.md b/vendor/src/github.com/prometheus/client_golang/README.md
new file mode 100644
index 00000000..d0e06bdf
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/README.md
@@ -0,0 +1,46 @@
+# Prometheus Go client library
+
+[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)
+
+This is the [Go](http://golang.org) client library for
+[Prometheus](http://prometheus.io). It has two separate parts, one for
+instrumenting application code, and one for creating clients that talk to the
+Prometheus HTTP API.
+
+## Instrumenting applications
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)
+
+The
+[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)
+contains the instrumentation library. See the
+[best practices section](http://prometheus.io/docs/practices/naming/) of the
+Prometheus documentation to learn more about instrumenting applications.
+
+The
+[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)
+contains simple examples of instrumented code.
+
+## Client for the Prometheus HTTP API
+
+[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)
+
+The
+[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)
+contains the client for the
+[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you
+to write Go applications that query time series data from a Prometheus server.
+
+## Where is `model`, `extraction`, and `text`?
+
+The `model` packages has been moved to
+[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).
+
+The `extraction` and `text` packages are now contained in
+[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).
+
+## Contributing and community
+
+See the [contributing guidelines](CONTRIBUTING.md) and the
+[Community section](http://prometheus.io/community/) of the homepage.
diff --git a/vendor/src/github.com/prometheus/client_golang/VERSION b/vendor/src/github.com/prometheus/client_golang/VERSION
new file mode 100644
index 00000000..a3df0a69
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/VERSION
@@ -0,0 +1 @@
+0.8.0
diff --git a/vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go b/vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go
new file mode 100644
index 00000000..cc5cbc36
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/api/prometheus/api.go
@@ -0,0 +1,348 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides bindings to the Prometheus HTTP API:
+// http://prometheus.io/docs/querying/api/
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+const (
+ statusAPIError = 422
+ apiPrefix = "/api/v1"
+
+ epQuery = "/query"
+ epQueryRange = "/query_range"
+ epLabelValues = "/label/:name/values"
+ epSeries = "/series"
+)
+
+// ErrorType models the different API error types.
+type ErrorType string
+
+// Possible values for ErrorType.
+const (
+ ErrBadData ErrorType = "bad_data"
+ ErrTimeout = "timeout"
+ ErrCanceled = "canceled"
+ ErrExec = "execution"
+ ErrBadResponse = "bad_response"
+)
+
+// Error is an error returned by the API.
+type Error struct {
+ Type ErrorType
+ Msg string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("%s: %s", e.Type, e.Msg)
+}
+
+// CancelableTransport is like net.Transport but provides
+// per-request cancelation functionality.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+// DefaultTransport is used if no Transport is set in Config.
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+// Config defines configuration parameters for a new client.
+type Config struct {
+ // The address of the Prometheus to connect to.
+ Address string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+// Client is the interface for an API client.
+type Client interface {
+ url(ep string, args map[string]string) *url.URL
+ do(context.Context, *http.Request) (*http.Response, []byte, error)
+}
+
+// New returns a new Client.
+//
+// It is safe to use the returned Client from multiple goroutines.
+func New(cfg Config) (Client, error) {
+ u, err := url.Parse(cfg.Address)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = strings.TrimRight(u.Path, "/") + apiPrefix
+
+ return &httpClient{
+ endpoint: u,
+ transport: cfg.transport(),
+ }, nil
+}
+
+type httpClient struct {
+ endpoint *url.URL
+ transport CancelableTransport
+}
+
+func (c *httpClient) url(ep string, args map[string]string) *url.URL {
+ p := path.Join(c.endpoint.Path, ep)
+
+ for arg, val := range args {
+ arg = ":" + arg
+ p = strings.Replace(p, arg, val, -1)
+ }
+
+ u := *c.endpoint
+ u.Path = p
+
+ return &u
+}
+
+func (c *httpClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ resp, err := ctxhttp.Do(ctx, &http.Client{Transport: c.transport}, req)
+
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = ioutil.ReadAll(resp.Body)
+ close(done)
+ }()
+
+ select {
+ case <-ctx.Done():
+ err = resp.Body.Close()
+ <-done
+ if err == nil {
+ err = ctx.Err()
+ }
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+// apiClient wraps a regular client and processes successful API responses.
+// Successful also includes responses that errored at the API level.
+type apiClient struct {
+ Client
+}
+
+type apiResponse struct {
+ Status string `json:"status"`
+ Data json.RawMessage `json:"data"`
+ ErrorType ErrorType `json:"errorType"`
+ Error string `json:"error"`
+}
+
+func (c apiClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ resp, body, err := c.Client.do(ctx, req)
+ if err != nil {
+ return resp, body, err
+ }
+
+ code := resp.StatusCode
+
+ if code/100 != 2 && code != statusAPIError {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
+ }
+ }
+
+ var result apiResponse
+
+ if err = json.Unmarshal(body, &result); err != nil {
+ return resp, body, &Error{
+ Type: ErrBadResponse,
+ Msg: err.Error(),
+ }
+ }
+
+ if (code == statusAPIError) != (result.Status == "error") {
+ err = &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ }
+ }
+
+ if code == statusAPIError && result.Status == "error" {
+ err = &Error{
+ Type: result.ErrorType,
+ Msg: result.Error,
+ }
+ }
+
+ return resp, []byte(result.Data), err
+}
+
+// Range represents a sliced time range.
+type Range struct {
+ // The boundaries of the time range.
+ Start, End time.Time
+ // The maximum time between two slices within the boundaries.
+ Step time.Duration
+}
+
+// queryResult contains result data for a query.
+type queryResult struct {
+ Type model.ValueType `json:"resultType"`
+ Result interface{} `json:"result"`
+
+ // The decoded value.
+ v model.Value
+}
+
+func (qr *queryResult) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Type model.ValueType `json:"resultType"`
+ Result json.RawMessage `json:"result"`
+ }{}
+
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ switch v.Type {
+ case model.ValScalar:
+ var sv model.Scalar
+ err = json.Unmarshal(v.Result, &sv)
+ qr.v = &sv
+
+ case model.ValVector:
+ var vv model.Vector
+ err = json.Unmarshal(v.Result, &vv)
+ qr.v = vv
+
+ case model.ValMatrix:
+ var mv model.Matrix
+ err = json.Unmarshal(v.Result, &mv)
+ qr.v = mv
+
+ default:
+ err = fmt.Errorf("unexpected value type %q", v.Type)
+ }
+ return err
+}
+
+// QueryAPI provides bindings the Prometheus's query API.
+type QueryAPI interface {
+ // Query performs a query for the given time.
+ Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
+ // Query performs a query for the given range.
+ QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
+}
+
+// NewQueryAPI returns a new QueryAPI for the client.
+//
+// It is safe to use the returned QueryAPI from multiple goroutines.
+func NewQueryAPI(c Client) QueryAPI {
+ return &httpQueryAPI{client: apiClient{c}}
+}
+
+type httpQueryAPI struct {
+ client Client
+}
+
+func (h *httpQueryAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
+ u := h.client.url(epQuery, nil)
+ q := u.Query()
+
+ q.Set("query", query)
+ q.Set("time", ts.Format(time.RFC3339Nano))
+
+ u.RawQuery = q.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+
+ _, body, err := h.client.do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var qres queryResult
+ err = json.Unmarshal(body, &qres)
+
+ return model.Value(qres.v), err
+}
+
+func (h *httpQueryAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
+ u := h.client.url(epQueryRange, nil)
+ q := u.Query()
+
+ var (
+ start = r.Start.Format(time.RFC3339Nano)
+ end = r.End.Format(time.RFC3339Nano)
+ step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
+ )
+
+ q.Set("query", query)
+ q.Set("start", start)
+ q.Set("end", end)
+ q.Set("step", step)
+
+ u.RawQuery = q.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+
+ _, body, err := h.client.do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ var qres queryResult
+ err = json.Unmarshal(body, &qres)
+
+ return model.Value(qres.v), err
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go b/vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go
new file mode 100644
index 00000000..ca084a04
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/api/prometheus/api_test.go
@@ -0,0 +1,453 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+)
+
+func TestConfig(t *testing.T) {
+ c := Config{}
+ if c.transport() != DefaultTransport {
+ t.Fatalf("expected default transport for nil Transport field")
+ }
+}
+
+func TestClientURL(t *testing.T) {
+ tests := []struct {
+ address string
+ endpoint string
+ args map[string]string
+ expected string
+ }{
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost",
+ endpoint: "/test",
+ expected: "http://localhost/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "test",
+ expected: "http://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090/prefix",
+ endpoint: "/test",
+ expected: "http://localhost:9090/prefix/test",
+ },
+ {
+ address: "https://localhost:9090/",
+ endpoint: "/test/",
+ expected: "https://localhost:9090/test",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:param",
+ args: map[string]string{
+ "param": "content",
+ },
+ expected: "http://localhost:9090/test/content/more/content",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param/more/:foo",
+ args: map[string]string{
+ "param": "content",
+ "foo": "bar",
+ },
+ expected: "http://localhost:9090/test/content/more/bar",
+ },
+ {
+ address: "http://localhost:9090",
+ endpoint: "/test/:param",
+ args: map[string]string{
+ "nonexistant": "content",
+ },
+ expected: "http://localhost:9090/test/:param",
+ },
+ }
+
+ for _, test := range tests {
+ ep, err := url.Parse(test.address)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hclient := &httpClient{
+ endpoint: ep,
+ transport: DefaultTransport,
+ }
+
+ u := hclient.url(test.endpoint, test.args)
+ if u.String() != test.expected {
+ t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ continue
+ }
+
+ // The apiClient must return exactly the same result as the httpClient.
+ aclient := &apiClient{hclient}
+
+ u = aclient.url(test.endpoint, test.args)
+ if u.String() != test.expected {
+ t.Errorf("unexpected result: got %s, want %s", u, test.expected)
+ }
+ }
+}
+
+type testClient struct {
+ *testing.T
+
+ ch chan apiClientTest
+ req *http.Request
+}
+
+type apiClientTest struct {
+ code int
+ response interface{}
+ expected string
+ err *Error
+}
+
+func (c *testClient) url(ep string, args map[string]string) *url.URL {
+ return nil
+}
+
+func (c *testClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+ if ctx == nil {
+ c.Fatalf("context was not passed down")
+ }
+ if req != c.req {
+ c.Fatalf("request was not passed down")
+ }
+
+ test := <-c.ch
+
+ var b []byte
+ var err error
+
+ switch v := test.response.(type) {
+ case string:
+ b = []byte(v)
+ default:
+ b, err = json.Marshal(v)
+ if err != nil {
+ c.Fatal(err)
+ }
+ }
+
+ resp := &http.Response{
+ StatusCode: test.code,
+ }
+
+ return resp, b, nil
+}
+
+func TestAPIClientDo(t *testing.T) {
+ tests := []apiClientTest{
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`null`),
+ ErrorType: ErrBadData,
+ Error: "failed",
+ },
+ err: &Error{
+ Type: ErrBadData,
+ Msg: "failed",
+ },
+ code: statusAPIError,
+ expected: `null`,
+ },
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrTimeout,
+ Msg: "timed out",
+ },
+ code: statusAPIError,
+ expected: `test`,
+ },
+ {
+ response: "bad json",
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "bad response code 400",
+ },
+ code: http.StatusBadRequest,
+ },
+ {
+ response: "bad json",
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "invalid character 'b' looking for beginning of value",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "success",
+ Data: json.RawMessage(`"test"`),
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "success",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: statusAPIError,
+ },
+ {
+ response: &apiResponse{
+ Status: "error",
+ Data: json.RawMessage(`"test"`),
+ ErrorType: ErrTimeout,
+ Error: "timed out",
+ },
+ err: &Error{
+ Type: ErrBadResponse,
+ Msg: "inconsistent body for response code",
+ },
+ code: http.StatusOK,
+ },
+ }
+
+ tc := &testClient{
+ T: t,
+ ch: make(chan apiClientTest, 1),
+ req: &http.Request{},
+ }
+ client := &apiClient{tc}
+
+ for _, test := range tests {
+
+ tc.ch <- test
+
+ _, body, err := client.do(context.Background(), tc.req)
+
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("expected error %q but got none", test.err)
+ continue
+ }
+ if test.err.Error() != err.Error() {
+ t.Errorf("unexpected error: want %q, got %q", test.err, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("unexpeceted error %s", err)
+ continue
+ }
+
+ want, got := test.expected, string(body)
+ if want != got {
+ t.Errorf("unexpected body: want %q, got %q", want, got)
+ }
+ }
+}
+
+type apiTestClient struct {
+ *testing.T
+ curTest apiTest
+}
+
+type apiTest struct {
+ do func() (interface{}, error)
+ inErr error
+ inRes interface{}
+
+ reqPath string
+ reqParam url.Values
+ reqMethod string
+ res interface{}
+ err error
+}
+
+func (c *apiTestClient) url(ep string, args map[string]string) *url.URL {
+ u := &url.URL{
+ Host: "test:9090",
+ Path: apiPrefix + ep,
+ }
+ return u
+}
+
+func (c *apiTestClient) do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
+
+ test := c.curTest
+
+ if req.URL.Path != test.reqPath {
+ c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
+ }
+ if req.Method != test.reqMethod {
+ c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
+ }
+
+ b, err := json.Marshal(test.inRes)
+ if err != nil {
+ c.Fatal(err)
+ }
+
+ resp := &http.Response{}
+ if test.inErr != nil {
+ resp.StatusCode = statusAPIError
+ } else {
+ resp.StatusCode = http.StatusOK
+ }
+
+ return resp, b, test.inErr
+}
+
+func TestAPIs(t *testing.T) {
+
+ testTime := time.Now()
+
+ client := &apiTestClient{T: t}
+
+ queryAPI := &httpQueryAPI{
+ client: client,
+ }
+
+ doQuery := func(q string, ts time.Time) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.Query(context.Background(), q, ts)
+ }
+ }
+
+ doQueryRange := func(q string, rng Range) func() (interface{}, error) {
+ return func() (interface{}, error) {
+ return queryAPI.QueryRange(context.Background(), q, rng)
+ }
+ }
+
+ queryTests := []apiTest{
+ {
+ do: doQuery("2", testTime),
+ inRes: &queryResult{
+ Type: model.ValScalar,
+ Result: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
+ },
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ res: &model.Scalar{
+ Value: 2,
+ Timestamp: model.TimeFromUnix(testTime.Unix()),
+ },
+ },
+ {
+ do: doQuery("2", testTime),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "time": []string{testTime.Format(time.RFC3339Nano)},
+ },
+ err: fmt.Errorf("some error"),
+ },
+
+ {
+ do: doQueryRange("2", Range{
+ Start: testTime.Add(-time.Minute),
+ End: testTime,
+ Step: time.Minute,
+ }),
+ inErr: fmt.Errorf("some error"),
+
+ reqMethod: "GET",
+ reqPath: "/api/v1/query_range",
+ reqParam: url.Values{
+ "query": []string{"2"},
+ "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
+ "end": []string{testTime.Format(time.RFC3339Nano)},
+ "step": []string{time.Minute.String()},
+ },
+ err: fmt.Errorf("some error"),
+ },
+ }
+
+ var tests []apiTest
+ tests = append(tests, queryTests...)
+
+ for _, test := range tests {
+ client.curTest = test
+
+ res, err := test.do()
+
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("expected error %q but got none", test.err)
+ continue
+ }
+ if err.Error() != test.err.Error() {
+ t.Errorf("unexpected error: want %s, got %s", test.err, err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ continue
+ }
+
+ if !reflect.DeepEqual(res, test.res) {
+ t.Errorf("unexpected result: want %v, got %v", test.res, res)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/examples/random/main.go b/vendor/src/github.com/prometheus/client_golang/examples/random/main.go
new file mode 100644
index 00000000..eef50d20
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/examples/random/main.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A simple example exposing fictional RPC latencies with different types of
+// random distributions (uniform, normal, and exponential) as Prometheus
+// metrics.
+package main
+
+import (
+ "flag"
+ "log"
+ "math"
+ "math/rand"
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+var (
+ addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
+ uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
+ normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
+ normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
+ oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
+)
+
+var (
+ // Create a summary to track fictional interservice RPC latencies for three
+ // distinct services with different latency distributions. These services are
+ // differentiated via a "service" label.
+ rpcDurations = prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "rpc_durations_seconds",
+ Help: "RPC latency distributions.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"service"},
+ )
+ // The same as above, but now as a histogram, and only for the normal
+ // distribution. The buckets are targeted to the parameters of the
+ // normal distribution, with 20 buckets centered on the mean, each
+ // half-sigma wide.
+ rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "rpc_durations_histogram_seconds",
+ Help: "RPC latency distributions.",
+ Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
+ })
+)
+
+func init() {
+ // Register the summary and the histogram with Prometheus's default registry.
+ prometheus.MustRegister(rpcDurations)
+ prometheus.MustRegister(rpcDurationsHistogram)
+}
+
+func main() {
+ flag.Parse()
+
+ start := time.Now()
+
+ oscillationFactor := func() float64 {
+ return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
+ }
+
+ // Periodically record some sample latencies for the three services.
+ go func() {
+ for {
+ v := rand.Float64() * *uniformDomain
+ rpcDurations.WithLabelValues("uniform").Observe(v)
+ time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ go func() {
+ for {
+ v := (rand.NormFloat64() * *normDomain) + *normMean
+ rpcDurations.WithLabelValues("normal").Observe(v)
+ rpcDurationsHistogram.Observe(v)
+ time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ go func() {
+ for {
+ v := rand.ExpFloat64() / 1e6
+ rpcDurations.WithLabelValues("exponential").Observe(v)
+ time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
+ }
+ }()
+
+ // Expose the registered metrics via HTTP.
+ http.Handle("/metrics", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(*addr, nil))
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/examples/simple/main.go b/vendor/src/github.com/prometheus/client_golang/examples/simple/main.go
new file mode 100644
index 00000000..1fc23249
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/examples/simple/main.go
@@ -0,0 +1,31 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A minimal example of how to include Prometheus instrumentation.
+package main
+
+import (
+ "flag"
+ "log"
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
+
+func main() {
+ flag.Parse()
+ http.Handle("/metrics", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(*addr, nil))
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/README.md b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 00000000..44986bff
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go
new file mode 100644
index 00000000..faad39b4
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/benchmark_test.go
@@ -0,0 +1,185 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "sync"
+ "testing"
+)
+
+func BenchmarkCounterWithLabelValues(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Inc()
+ }
+}
+
+func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for j := 0; j < b.N/10; j++ {
+ m.WithLabelValues("eins", "zwei", "drei").Inc()
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkCounterWithMappedLabels(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc()
+ }
+}
+
+func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {
+ m := NewCounterVec(
+ CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ labels := Labels{"two": "zwei", "one": "eins", "three": "drei"}
+ for i := 0; i < b.N; i++ {
+ m.With(labels).Inc()
+ }
+}
+
+func BenchmarkCounterNoLabels(b *testing.B) {
+ m := NewCounter(CounterOpts{
+ Name: "benchmark_counter",
+ Help: "A counter to benchmark it.",
+ })
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Inc()
+ }
+}
+
+func BenchmarkGaugeWithLabelValues(b *testing.B) {
+ m := NewGaugeVec(
+ GaugeOpts{
+ Name: "benchmark_gauge",
+ Help: "A gauge to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Set(3.1415)
+ }
+}
+
+func BenchmarkGaugeNoLabels(b *testing.B) {
+ m := NewGauge(GaugeOpts{
+ Name: "benchmark_gauge",
+ Help: "A gauge to benchmark it.",
+ })
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Set(3.1415)
+ }
+}
+
+func BenchmarkSummaryWithLabelValues(b *testing.B) {
+ m := NewSummaryVec(
+ SummaryOpts{
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
+ }
+}
+
+func BenchmarkSummaryNoLabels(b *testing.B) {
+ m := NewSummary(SummaryOpts{
+ Name: "benchmark_summary",
+ Help: "A summary to benchmark it.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Observe(3.1415)
+ }
+}
+
+func BenchmarkHistogramWithLabelValues(b *testing.B) {
+ m := NewHistogramVec(
+ HistogramOpts{
+ Name: "benchmark_histogram",
+ Help: "A histogram to benchmark it.",
+ },
+ []string{"one", "two", "three"},
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415)
+ }
+}
+
+func BenchmarkHistogramNoLabels(b *testing.B) {
+ m := NewHistogram(HistogramOpts{
+ Name: "benchmark_histogram",
+ Help: "A histogram to benchmark it.",
+ },
+ )
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.Observe(3.1415)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 00000000..623d3d83
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 00000000..72d5256a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,164 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/counter_test.go
new file mode 100644
index 00000000..67391a23
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/counter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestCounterAdd(t *testing.T) {
+ counter := NewCounter(CounterOpts{
+ Name: "test",
+ Help: "test help",
+ ConstLabels: Labels{"a": "1", "b": "2"},
+ }).(*counter)
+ counter.Inc()
+ if expected, got := 1., math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+ counter.Add(42)
+ if expected, got := 43., math.Float64frombits(counter.valBits); expected != got {
+ t.Errorf("Expected %f, got %f.", expected, got)
+ }
+
+ if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got {
+ t.Errorf("Expected error %q, got %q.", expected, got)
+ }
+
+ m := &dto.Metric{}
+ counter.Write(m)
+
+ if expected, got := `label: label: counter: `, m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func decreaseCounter(c *counter) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(error)
+ }
+ }()
+ c.Add(-1)
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 00000000..1835b16f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,200 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 00000000..618c4dee
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,181 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
+//
+// All exported functions and methods are safe to be used concurrently unless
+//specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
+// HistogramOpts, or UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might
+// cause. As suggested by the name, MustRegister panics if an error occurs. With
+// the Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data
+// model. Inconsistencies are ideally detected at registration time, not at
+// collect time. The former will usually be detected at start-up time of a
+// program, while the latter will only happen at scrape time, possibly not even
+// on the first scrape if the inconsistency only becomes relevant later. That is
+// the main reason why a Collector and a Metric have to describe themselves to
+// the registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in
+// the same way on a custom registry as the global functions Register and
+// Unregister on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries
+// with special properties, see NewPedanticRegistry. You can avoid global state,
+// as it is imposed by the DefaultRegistry. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp
+// sub-package. (The top-level functions in the prometheus package are
+// deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added. Sending metrics to
+// Graphite would be an example that will soon be implemented.
+package prometheus
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
new file mode 100644
index 00000000..260c1b52
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
@@ -0,0 +1,118 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// ClusterManager is an example for a system that might have been built without
+// Prometheus in mind. It models a central manager of jobs running in a
+// cluster. To turn it into something that collects Prometheus metrics, we
+// simply add the two methods required for the Collector interface.
+//
+// An additional challenge is that multiple instances of the ClusterManager are
+// run within the same binary, each in charge of a different zone. We need to
+// make use of ConstLabels to be able to register each ClusterManager instance
+// with Prometheus.
+type ClusterManager struct {
+ Zone string
+ OOMCountDesc *prometheus.Desc
+ RAMUsageDesc *prometheus.Desc
+ // ... many more fields
+}
+
+// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
+// real cluster manager would have to do. Since it may actually be really
+// expensive, it must only be called once per collection. This implementation,
+// obviously, only returns some made-up data.
+func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
+ oomCountByHost map[string]int, ramUsageByHost map[string]float64,
+) {
+ // Just example fake data.
+ oomCountByHost = map[string]int{
+ "foo.example.org": 42,
+ "bar.example.org": 2001,
+ }
+ ramUsageByHost = map[string]float64{
+ "foo.example.org": 6.023e23,
+ "bar.example.org": 3.14,
+ }
+ return
+}
+
+// Describe simply sends the two Descs in the struct to the channel.
+func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
+ ch <- c.OOMCountDesc
+ ch <- c.RAMUsageDesc
+}
+
+// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
+// creates constant metrics for each host on the fly based on the returned data.
+//
+// Note that Collect could be called concurrently, so we depend on
+// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
+func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
+ oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
+ for host, oomCount := range oomCountByHost {
+ ch <- prometheus.MustNewConstMetric(
+ c.OOMCountDesc,
+ prometheus.CounterValue,
+ float64(oomCount),
+ host,
+ )
+ }
+ for host, ramUsage := range ramUsageByHost {
+ ch <- prometheus.MustNewConstMetric(
+ c.RAMUsageDesc,
+ prometheus.GaugeValue,
+ ramUsage,
+ host,
+ )
+ }
+}
+
+// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
+// that the zone is set as a ConstLabel. (It's different in each instance of the
+// ClusterManager, but constant over the lifetime of an instance.) Then there is
+// a variable label "host", since we want to partition the collected metrics by
+// host. Since all Descs created in this way are consistent across instances,
+// with a guaranteed distinction by the "zone" label, we can register different
+// ClusterManager instances with the same registry.
+func NewClusterManager(zone string) *ClusterManager {
+ return &ClusterManager{
+ Zone: zone,
+ OOMCountDesc: prometheus.NewDesc(
+ "clustermanager_oom_crashes_total",
+ "Number of OOM crashes.",
+ []string{"host"},
+ prometheus.Labels{"zone": zone},
+ ),
+ RAMUsageDesc: prometheus.NewDesc(
+ "clustermanager_ram_usage_bytes",
+ "RAM usage as reported to the cluster manager.",
+ []string{"host"},
+ prometheus.Labels{"zone": zone},
+ ),
+ }
+}
+
+func ExampleCollector() {
+ workerDB := NewClusterManager("db")
+ workerCA := NewClusterManager("ca")
+
+ // Since we are dealing with custom Collector implementations, it might
+ // be a good idea to try it out with a pedantic registry.
+ reg := prometheus.NewPedanticRegistry()
+ reg.MustRegister(workerDB)
+ reg.MustRegister(workerCA)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go
new file mode 100644
index 00000000..76378809
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ // apiRequestDuration tracks the duration separate for each HTTP status
+ // class (1xx, 2xx, ...). This creates a fair amount of time series on
+ // the Prometheus server. Usually, you would track the duration of
+ // serving HTTP request without partitioning by outcome. Do something
+ // like this only if needed. Also note how only status classes are
+ // tracked, not every single status code. The latter would create an
+ // even larger amount of time series. Request counters partitioned by
+ // status code are usually OK as each counter only creates one time
+ // series. Histograms are way more expensive, so partition with care and
+ // only where you really need separate latency tracking. Partitioning by
+ // status class is only an example. In concrete cases, other partitions
+ // might make more sense.
+ apiRequestDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "api_request_duration_seconds",
+ Help: "Histogram for the request duration of the public API, partitioned by status class.",
+ Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
+ },
+ []string{"status_class"},
+ )
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ status := http.StatusOK
+ // The ObserverFunc gets called by the deferred ObserveDuration and
+ // decides wich Histogram's Observe method is called.
+ timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
+ switch {
+ case status >= 500: // Server error.
+ apiRequestDuration.WithLabelValues("5xx").Observe(v)
+ case status >= 400: // Client error.
+ apiRequestDuration.WithLabelValues("4xx").Observe(v)
+ case status >= 300: // Redirection.
+ apiRequestDuration.WithLabelValues("3xx").Observe(v)
+ case status >= 200: // Success.
+ apiRequestDuration.WithLabelValues("2xx").Observe(v)
+ default: // Informational.
+ apiRequestDuration.WithLabelValues("1xx").Observe(v)
+ }
+ }))
+ defer timer.ObserveDuration()
+
+ // Handle the request. Set status accordingly.
+ // ...
+}
+
+func ExampleTimer_complex() {
+ http.HandleFunc("/api", handler)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go
new file mode 100644
index 00000000..dd91066f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go
@@ -0,0 +1,38 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ // If a function is called rarely (i.e. not more often than scrapes
+ // happen) or ideally only once (like in a batch job), it can make sense
+ // to use a Gauge for timing the function call. For timing a batch job
+ // and pushing the result to a Pushgateway, see also the comprehensive
+ // example in the push package.
+ funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "example_function_duration_seconds",
+ Help: "Duration of the last call of an example function.",
+ })
+)
+
+func ExampleTimer_gauge() error {
+ // The Set method of the Gauge is used to observe the duration.
+ timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))
+ defer timer.ObserveDuration()
+
+ // Do something. Return errors as encountered. The use of 'defer' above
+ // makes sure the function is still timed properly.
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_test.go
new file mode 100644
index 00000000..bd86bb47
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/example_timer_test.go
@@ -0,0 +1,40 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "example_request_duration_seconds",
+ Help: "Histogram for the runtime of a simple example function.",
+ Buckets: prometheus.LinearBuckets(0.01, 0.01, 10),
+ })
+)
+
+func ExampleTimer() {
+ // timer times this example function. It uses a Histogram, but a Summary
+ // would also work, as both implement Observer. Check out
+ // https://prometheus.io/docs/practices/histograms/ for differences.
+ timer := prometheus.NewTimer(requestDuration)
+ defer timer.ObserveDuration()
+
+ // Do something here that takes time.
+ time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/examples_test.go
new file mode 100644
index 00000000..45f60650
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/examples_test.go
@@ -0,0 +1,754 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "net/http"
+ "runtime"
+ "sort"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func ExampleGauge() {
+ opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "our_company",
+ Subsystem: "blob_storage",
+ Name: "ops_queued",
+ Help: "Number of blob storage operations waiting to be processed.",
+ })
+ prometheus.MustRegister(opsQueued)
+
+ // 10 operations queued by the goroutine managing incoming requests.
+ opsQueued.Add(10)
+ // A worker goroutine has picked up a waiting operation.
+ opsQueued.Dec()
+ // And once more...
+ opsQueued.Dec()
+}
+
+func ExampleGaugeVec() {
+ opsQueued := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "our_company",
+ Subsystem: "blob_storage",
+ Name: "ops_queued",
+ Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.",
+ },
+ []string{
+ // Which user has requested the operation?
+ "user",
+ // Of what type is the operation?
+ "type",
+ },
+ )
+ prometheus.MustRegister(opsQueued)
+
+ // Increase a value using compact (but order-sensitive!) WithLabelValues().
+ opsQueued.WithLabelValues("bob", "put").Add(4)
+ // Increase a value with a map using WithLabels. More verbose, but order
+ // doesn't matter anymore.
+ opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc()
+}
+
+func ExampleGaugeFunc() {
+ if err := prometheus.Register(prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Subsystem: "runtime",
+ Name: "goroutines_count",
+ Help: "Number of goroutines that currently exist.",
+ },
+ func() float64 { return float64(runtime.NumGoroutine()) },
+ )); err == nil {
+ fmt.Println("GaugeFunc 'goroutines_count' registered.")
+ }
+ // Note that the count of goroutines is a gauge (and not a counter) as
+ // it can go up and down.
+
+ // Output:
+ // GaugeFunc 'goroutines_count' registered.
+}
+
+func ExampleCounter() {
+ pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "repository_pushes", // Note: No help string...
+ })
+ err := prometheus.Register(pushCounter) // ... so this will return an error.
+ if err != nil {
+ fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
+ return
+ }
+
+ // Try it once more, this time with a help string.
+ pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "repository_pushes",
+ Help: "Number of pushes to external repository.",
+ })
+ err = prometheus.Register(pushCounter)
+ if err != nil {
+ fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
+ return
+ }
+
+ pushComplete := make(chan struct{})
+ // TODO: Start a goroutine that performs repository pushes and reports
+ // each completion via the channel.
+ for range pushComplete {
+ pushCounter.Inc()
+ }
+ // Output:
+ // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
+}
+
+func ExampleCounterVec() {
+ httpReqs := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "http_requests_total",
+ Help: "How many HTTP requests processed, partitioned by status code and HTTP method.",
+ },
+ []string{"code", "method"},
+ )
+ prometheus.MustRegister(httpReqs)
+
+ httpReqs.WithLabelValues("404", "POST").Add(42)
+
+ // If you have to access the same set of labels very frequently, it
+ // might be good to retrieve the metric only once and keep a handle to
+ // it. But beware of deletion of that metric, see below!
+ m := httpReqs.WithLabelValues("200", "GET")
+ for i := 0; i < 1000000; i++ {
+ m.Inc()
+ }
+ // Delete a metric from the vector. If you have previously kept a handle
+ // to that metric (as above), future updates via that handle will go
+ // unseen (even if you re-create a metric with the same label set
+ // later).
+ httpReqs.DeleteLabelValues("200", "GET")
+ // Same thing with the more verbose Labels syntax.
+ httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"})
+}
+
+func ExampleInstrumentHandler() {
+ // Handle the "/doc" endpoint with the standard http.FileServer handler.
+ // By wrapping the handler with InstrumentHandler, request count,
+ // request and response sizes, and request latency are automatically
+ // exported to Prometheus, partitioned by HTTP status code and method
+ // and by the handler name (here "fileserver").
+ http.Handle("/doc", prometheus.InstrumentHandler(
+ "fileserver", http.FileServer(http.Dir("/usr/share/doc")),
+ ))
+ // The Prometheus handler still has to be registered to handle the
+ // "/metrics" endpoint. The handler returned by prometheus.Handler() is
+ // already instrumented - with "prometheus" as the handler name. In this
+ // example, we want the handler name to be "metrics", so we instrument
+ // the uninstrumented Prometheus handler ourselves.
+ http.Handle("/metrics", prometheus.InstrumentHandler(
+ "metrics", prometheus.UninstrumentedHandler(),
+ ))
+}
+
+func ExampleLabelPairSorter() {
+ labelPairs := []*dto.LabelPair{
+ {Name: proto.String("status"), Value: proto.String("404")},
+ {Name: proto.String("method"), Value: proto.String("get")},
+ }
+
+ sort.Sort(prometheus.LabelPairSorter(labelPairs))
+
+ fmt.Println(labelPairs)
+ // Output:
+ // [name:"method" value:"get" name:"status" value:"404" ]
+}
+
+func ExampleRegister() {
+ // Imagine you have a worker pool and want to count the tasks completed.
+ taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_total",
+ Help: "Total number of tasks completed.",
+ })
+ // This will register fine.
+ if err := prometheus.Register(taskCounter); err != nil {
+ fmt.Println(err)
+ } else {
+ fmt.Println("taskCounter registered.")
+ }
+ // Don't forget to tell the HTTP server about the Prometheus handler.
+ // (In a real program, you still need to start the HTTP server...)
+ http.Handle("/metrics", prometheus.Handler())
+
+ // Now you can start workers and give every one of them a pointer to
+ // taskCounter and let it increment it whenever it completes a task.
+ taskCounter.Inc() // This has to happen somewhere in the worker code.
+
+ // But wait, you want to see how individual workers perform. So you need
+ // a vector of counters, with one element for each worker.
+ taskCounterVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_total",
+ Help: "Total number of tasks completed.",
+ },
+ []string{"worker_id"},
+ )
+
+ // Registering will fail because we already have a metric of that name.
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+
+ // To fix, first unregister the old taskCounter.
+ if prometheus.Unregister(taskCounter) {
+ fmt.Println("taskCounter unregistered.")
+ }
+
+ // Try registering taskCounterVec again.
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+ // Bummer! Still doesn't work.
+
+ // Prometheus will not allow you to ever export metrics with
+ // inconsistent help strings or label names. After unregistering, the
+ // unregistered metrics will cease to show up in the /metrics HTTP
+ // response, but the registry still remembers that those metrics had
+ // been exported before. For this example, we will now choose a
+ // different name. (In a real program, you would obviously not export
+ // the obsolete metric in the first place.)
+ taskCounterVec = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks_by_id",
+ Help: "Total number of tasks completed.",
+ },
+ []string{"worker_id"},
+ )
+ if err := prometheus.Register(taskCounterVec); err != nil {
+ fmt.Println("taskCounterVec not registered:", err)
+ } else {
+ fmt.Println("taskCounterVec registered.")
+ }
+ // Finally it worked!
+
+ // The workers have to tell taskCounterVec their id to increment the
+ // right element in the metric vector.
+ taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42.
+
+ // Each worker could also keep a reference to their own counter element
+ // around. Pick the counter at initialization time of the worker.
+ myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code.
+ myCounter.Inc() // Somewhere in the code of that worker.
+
+ // Note that something like WithLabelValues("42", "spurious arg") would
+ // panic (because you have provided too many label values). If you want
+ // to get an error instead, use GetMetricWithLabelValues(...) instead.
+ notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg")
+ if err != nil {
+ fmt.Println("Worker initialization failed:", err)
+ }
+ if notMyCounter == nil {
+ fmt.Println("notMyCounter is nil.")
+ }
+
+ // A different (and somewhat tricky) approach is to use
+ // ConstLabels. ConstLabels are pairs of label names and label values
+ // that never change. You might ask what those labels are good for (and
+ // rightfully so - if they never change, they could as well be part of
+ // the metric name). There are essentially two use-cases: The first is
+ // if labels are constant throughout the lifetime of a binary execution,
+ // but they vary over time or between different instances of a running
+ // binary. The second is what we have here: Each worker creates and
+ // registers an own Counter instance where the only difference is in the
+ // value of the ConstLabels. Those Counters can all be registered
+ // because the different ConstLabel values guarantee that each worker
+ // will increment a different Counter metric.
+ counterOpts := prometheus.CounterOpts{
+ Subsystem: "worker_pool",
+ Name: "completed_tasks",
+ Help: "Total number of tasks completed.",
+ ConstLabels: prometheus.Labels{"worker_id": "42"},
+ }
+ taskCounterForWorker42 := prometheus.NewCounter(counterOpts)
+ if err := prometheus.Register(taskCounterForWorker42); err != nil {
+ fmt.Println("taskCounterVForWorker42 not registered:", err)
+ } else {
+ fmt.Println("taskCounterForWorker42 registered.")
+ }
+ // Obviously, in real code, taskCounterForWorker42 would be a member
+ // variable of a worker struct, and the "42" would be retrieved with a
+ // GetId() method or something. The Counter would be created and
+ // registered in the initialization code of the worker.
+
+ // For the creation of the next Counter, we can recycle
+ // counterOpts. Just change the ConstLabels.
+ counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"}
+ taskCounterForWorker2001 := prometheus.NewCounter(counterOpts)
+ if err := prometheus.Register(taskCounterForWorker2001); err != nil {
+ fmt.Println("taskCounterVForWorker2001 not registered:", err)
+ } else {
+ fmt.Println("taskCounterForWorker2001 registered.")
+ }
+
+ taskCounterForWorker2001.Inc()
+ taskCounterForWorker42.Inc()
+ taskCounterForWorker2001.Inc()
+
+ // Yet another approach would be to turn the workers themselves into
+ // Collectors and register them. See the Collector example for details.
+
+ // Output:
+ // taskCounter registered.
+ // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
+ // taskCounter unregistered.
+ // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string
+ // taskCounterVec registered.
+ // Worker initialization failed: inconsistent label cardinality
+ // notMyCounter is nil.
+ // taskCounterForWorker42 registered.
+ // taskCounterForWorker2001 registered.
+}
+
+func ExampleSummary() {
+ temps := prometheus.NewSummary(prometheus.SummaryOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ })
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ }
+
+ // Just for demonstration, let's check the state of the summary by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ temps.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // quantile: <
+ // quantile: 0.5
+ // value: 31.1
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.3
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+}
+
+func ExampleSummaryVec() {
+ temps := prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"species"},
+ )
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)
+ }
+
+ // Create a Summary without any observations.
+ temps.WithLabelValues("leiopelma-hochstetteri")
+
+ // Just for demonstration, let's check the state of the summary vector
+ // by registering it with a custom registry and then let it collect the
+ // metrics.
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(temps)
+
+ metricFamilies, err := reg.Gather()
+ if err != nil || len(metricFamilies) != 1 {
+ panic("unexpected behavior of custom test registry")
+ }
+ fmt.Println(proto.MarshalTextString(metricFamilies[0]))
+
+ // Output:
+ // name: "pond_temperature_celsius"
+ // help: "The temperature of the frog pond."
+ // type: SUMMARY
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "leiopelma-hochstetteri"
+ // >
+ // summary: <
+ // sample_count: 0
+ // sample_sum: 0
+ // quantile: <
+ // quantile: 0.5
+ // value: nan
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: nan
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: nan
+ // >
+ // >
+ // >
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "lithobates-catesbeianus"
+ // >
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 31956.100000000017
+ // quantile: <
+ // quantile: 0.5
+ // value: 32.4
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.4
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+ // >
+ // metric: <
+ // label: <
+ // name: "species"
+ // value: "litoria-caerulea"
+ // >
+ // summary: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // quantile: <
+ // quantile: 0.5
+ // value: 31.1
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 41.3
+ // >
+ // quantile: <
+ // quantile: 0.99
+ // value: 41.9
+ // >
+ // >
+ // >
+}
+
+func ExampleNewConstSummary() {
+ desc := prometheus.NewDesc(
+ "http_request_duration_seconds",
+ "A summary of the HTTP request durations.",
+ []string{"code", "method"},
+ prometheus.Labels{"owner": "example"},
+ )
+
+ // Create a constant summary from values we got from a 3rd party telemetry system.
+ s := prometheus.MustNewConstSummary(
+ desc,
+ 4711, 403.34,
+ map[float64]float64{0.5: 42.3, 0.9: 323.3},
+ "200", "get",
+ )
+
+ // Just for demonstration, let's check the state of the summary by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ s.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // label: <
+ // name: "code"
+ // value: "200"
+ // >
+ // label: <
+ // name: "method"
+ // value: "get"
+ // >
+ // label: <
+ // name: "owner"
+ // value: "example"
+ // >
+ // summary: <
+ // sample_count: 4711
+ // sample_sum: 403.34
+ // quantile: <
+ // quantile: 0.5
+ // value: 42.3
+ // >
+ // quantile: <
+ // quantile: 0.9
+ // value: 323.3
+ // >
+ // >
+}
+
+func ExampleHistogram() {
+ temps := prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "pond_temperature_celsius",
+ Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
+ Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide.
+ })
+
+ // Simulate some observations.
+ for i := 0; i < 1000; i++ {
+ temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
+ }
+
+ // Just for demonstration, let's check the state of the histogram by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ temps.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // histogram: <
+ // sample_count: 1000
+ // sample_sum: 29969.50000000001
+ // bucket: <
+ // cumulative_count: 192
+ // upper_bound: 20
+ // >
+ // bucket: <
+ // cumulative_count: 366
+ // upper_bound: 25
+ // >
+ // bucket: <
+ // cumulative_count: 501
+ // upper_bound: 30
+ // >
+ // bucket: <
+ // cumulative_count: 638
+ // upper_bound: 35
+ // >
+ // bucket: <
+ // cumulative_count: 816
+ // upper_bound: 40
+ // >
+ // >
+}
+
+func ExampleNewConstHistogram() {
+ desc := prometheus.NewDesc(
+ "http_request_duration_seconds",
+ "A histogram of the HTTP request durations.",
+ []string{"code", "method"},
+ prometheus.Labels{"owner": "example"},
+ )
+
+ // Create a constant histogram from values we got from a 3rd party telemetry system.
+ h := prometheus.MustNewConstHistogram(
+ desc,
+ 4711, 403.34,
+ map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
+ "200", "get",
+ )
+
+ // Just for demonstration, let's check the state of the histogram by
+ // (ab)using its Write method (which is usually only used by Prometheus
+ // internally).
+ metric := &dto.Metric{}
+ h.Write(metric)
+ fmt.Println(proto.MarshalTextString(metric))
+
+ // Output:
+ // label: <
+ // name: "code"
+ // value: "200"
+ // >
+ // label: <
+ // name: "method"
+ // value: "get"
+ // >
+ // label: <
+ // name: "owner"
+ // value: "example"
+ // >
+ // histogram: <
+ // sample_count: 4711
+ // sample_sum: 403.34
+ // bucket: <
+ // cumulative_count: 121
+ // upper_bound: 25
+ // >
+ // bucket: <
+ // cumulative_count: 2403
+ // upper_bound: 50
+ // >
+ // bucket: <
+ // cumulative_count: 3221
+ // upper_bound: 100
+ // >
+ // bucket: <
+ // cumulative_count: 4233
+ // upper_bound: 200
+ // >
+ // >
+}
+
+func ExampleAlreadyRegisteredError() {
+ reqCounter := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "requests_total",
+ Help: "The total number of requests served.",
+ })
+ if err := prometheus.Register(reqCounter); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ // A counter for that metric has been registered before.
+ // Use the old counter from now on.
+ reqCounter = are.ExistingCollector.(prometheus.Counter)
+ } else {
+ // Something else went wrong!
+ panic(err)
+ }
+ }
+ reqCounter.Inc()
+}
+
+func ExampleGatherers() {
+ reg := prometheus.NewRegistry()
+ temp := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "temperature_kelvin",
+ Help: "Temperature in Kelvin.",
+ },
+ []string{"location"},
+ )
+ reg.MustRegister(temp)
+ temp.WithLabelValues("outside").Set(273.14)
+ temp.WithLabelValues("inside").Set(298.44)
+
+ var parser expfmt.TextParser
+
+ text := `
+# TYPE humidity_percent gauge
+# HELP humidity_percent Humidity in %.
+humidity_percent{location="outside"} 45.4
+humidity_percent{location="inside"} 33.2
+# TYPE temperature_kelvin gauge
+# HELP temperature_kelvin Temperature in Kelvin.
+temperature_kelvin{location="somewhere else"} 4.5
+`
+
+ parseText := func() ([]*dto.MetricFamily, error) {
+ parsed, err := parser.TextToMetricFamilies(strings.NewReader(text))
+ if err != nil {
+ return nil, err
+ }
+ var result []*dto.MetricFamily
+ for _, mf := range parsed {
+ result = append(result, mf)
+ }
+ return result, nil
+ }
+
+ gatherers := prometheus.Gatherers{
+ reg,
+ prometheus.GathererFunc(parseText),
+ }
+
+ gathering, err := gatherers.Gather()
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ out := &bytes.Buffer{}
+ for _, mf := range gathering {
+ if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
+ panic(err)
+ }
+ }
+ fmt.Print(out.String())
+ fmt.Println("----------")
+
+ // Note how the temperature_kelvin metric family has been merged from
+ // different sources. Now try
+ text = `
+# TYPE humidity_percent gauge
+# HELP humidity_percent Humidity in %.
+humidity_percent{location="outside"} 45.4
+humidity_percent{location="inside"} 33.2
+# TYPE temperature_kelvin gauge
+# HELP temperature_kelvin Temperature in Kelvin.
+# Duplicate metric:
+temperature_kelvin{location="outside"} 265.3
+ # Wrong labels:
+temperature_kelvin 4.5
+`
+
+ gathering, err = gatherers.Gather()
+ if err != nil {
+ fmt.Println(err)
+ }
+ // Note that still as many metrics as possible are returned:
+ out.Reset()
+ for _, mf := range gathering {
+ if _, err := expfmt.MetricFamilyToText(out, mf); err != nil {
+ panic(err)
+ }
+ }
+ fmt.Print(out.String())
+
+ // Output:
+ // # HELP humidity_percent Humidity in %.
+ // # TYPE humidity_percent gauge
+ // humidity_percent{location="inside"} 33.2
+ // humidity_percent{location="outside"} 45.4
+ // # HELP temperature_kelvin Temperature in Kelvin.
+ // # TYPE temperature_kelvin gauge
+ // temperature_kelvin{location="inside"} 298.44
+ // temperature_kelvin{location="outside"} 273.14
+ // temperature_kelvin{location="somewhere else"} 4.5
+ // ----------
+ // 2 error(s) occurred:
+ // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values
+ // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family
+ // # HELP humidity_percent Humidity in %.
+ // # TYPE humidity_percent gauge
+ // humidity_percent{location="inside"} 33.2
+ // humidity_percent{location="outside"} 45.4
+ // # HELP temperature_kelvin Temperature in Kelvin.
+ // # TYPE temperature_kelvin gauge
+ // temperature_kelvin{location="inside"} 298.44
+ // temperature_kelvin{location="outside"} 273.14
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 00000000..18a99d5f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
new file mode 100644
index 00000000..910dac32
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus_test
+
+import (
+ "expvar"
+ "fmt"
+ "sort"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func ExampleNewExpvarCollector() {
+ expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{
+ "memstats": prometheus.NewDesc(
+ "expvar_memstats",
+ "All numeric memstats as one metric family. Not a good role-model, actually... ;-)",
+ []string{"type"}, nil,
+ ),
+ "lone-int": prometheus.NewDesc(
+ "expvar_lone_int",
+ "Just an expvar int as an example.",
+ nil, nil,
+ ),
+ "http-request-map": prometheus.NewDesc(
+ "expvar_http_request_total",
+ "How many http requests processed, partitioned by status code and http method.",
+ []string{"code", "method"}, nil,
+ ),
+ })
+ prometheus.MustRegister(expvarCollector)
+
+ // The Prometheus part is done here. But to show that this example is
+ // doing anything, we have to manually export something via expvar. In
+ // real-life use-cases, some library would already have exported via
+ // expvar what we want to re-export as Prometheus metrics.
+ expvar.NewInt("lone-int").Set(42)
+ expvarMap := expvar.NewMap("http-request-map")
+ var (
+ expvarMap1, expvarMap2 expvar.Map
+ expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int
+ )
+ expvarMap1.Init()
+ expvarMap2.Init()
+ expvarInt11.Set(3)
+ expvarInt12.Set(13)
+ expvarInt21.Set(11)
+ expvarInt22.Set(212)
+ expvarMap1.Set("POST", &expvarInt11)
+ expvarMap1.Set("GET", &expvarInt12)
+ expvarMap2.Set("POST", &expvarInt21)
+ expvarMap2.Set("GET", &expvarInt22)
+ expvarMap.Set("404", &expvarMap1)
+ expvarMap.Set("200", &expvarMap2)
+ // Results in the following expvar map:
+ // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}}
+
+ // Let's see what the scrape would yield, but exclude the memstats metrics.
+ metricStrings := []string{}
+ metric := dto.Metric{}
+ metricChan := make(chan prometheus.Metric)
+ go func() {
+ expvarCollector.Collect(metricChan)
+ close(metricChan)
+ }()
+ for m := range metricChan {
+ if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
+ metric.Reset()
+ m.Write(&metric)
+ metricStrings = append(metricStrings, metric.String())
+ }
+ }
+ sort.Strings(metricStrings)
+ for _, s := range metricStrings {
+ fmt.Println(strings.TrimRight(s, " "))
+ }
+ // Output:
+ // label: label: untyped:
+ // label: label: untyped:
+ // label: label: untyped:
+ // label: label: untyped:
+ // untyped:
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/src/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 00000000..e3b67df8
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 00000000..9ab5a3d6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
+ Inc()
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge_test.go
new file mode 100644
index 00000000..8e5f002c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/gauge_test.go
@@ -0,0 +1,202 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "sync"
+ "testing"
+ "testing/quick"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func listenGaugeStream(vals, result chan float64, done chan struct{}) {
+ var sum float64
+outer:
+ for {
+ select {
+ case <-done:
+ close(vals)
+ for v := range vals {
+ sum += v
+ }
+ break outer
+ case v := <-vals:
+ sum += v
+ }
+ }
+ result <- sum
+ close(result)
+}
+
+func TestGaugeConcurrency(t *testing.T) {
+ it := func(n uint32) bool {
+ mutations := int(n % 10000)
+ concLevel := int(n%15 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sStream := make(chan float64, mutations*concLevel)
+ result := make(chan float64)
+ done := make(chan struct{})
+
+ go listenGaugeStream(sStream, result, done)
+ go func() {
+ end.Wait()
+ close(done)
+ }()
+
+ gge := NewGauge(GaugeOpts{
+ Name: "test_gauge",
+ Help: "no help can be found here",
+ })
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ vals[j] = rand.Float64() - 0.5
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sStream <- v
+ gge.Add(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ start.Done()
+
+ if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ t.Fatalf("expected approx. %f, got %f", expected, got)
+ return false
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGaugeVecConcurrency(t *testing.T) {
+ it := func(n uint32) bool {
+ mutations := int(n % 10000)
+ concLevel := int(n%15 + 1)
+ vecLength := int(n%5 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sStreams := make([]chan float64, vecLength)
+ results := make([]chan float64, vecLength)
+ done := make(chan struct{})
+
+ for i := 0; i < vecLength; i++ {
+ sStreams[i] = make(chan float64, mutations*concLevel)
+ results[i] = make(chan float64)
+ go listenGaugeStream(sStreams[i], results[i], done)
+ }
+
+ go func() {
+ end.Wait()
+ close(done)
+ }()
+
+ gge := NewGaugeVec(
+ GaugeOpts{
+ Name: "test_gauge",
+ Help: "no help can be found here",
+ },
+ []string{"label"},
+ )
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ pick := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ vals[j] = rand.Float64() - 0.5
+ pick[j] = rand.Intn(vecLength)
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ sStreams[pick[i]] <- v
+ gge.WithLabelValues(string('A' + pick[i])).Add(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ start.Done()
+
+ for i := range sStreams {
+ if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {
+ t.Fatalf("expected approx. %f, got %f", expected, got)
+ return false
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGaugeFunc(t *testing.T) {
+ gf := NewGaugeFunc(
+ GaugeOpts{
+ Name: "test_name",
+ Help: "test help",
+ ConstLabels: Labels{"a": "1", "b": "2"},
+ },
+ func() float64 { return 3.1415 },
+ )
+
+ if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+
+ m := &dto.Metric{}
+ gf.Write(m)
+
+ if expected, got := `label: label: gauge: `, m.String(); expected != got {
+ t.Errorf("expected %q, got %q", expected, got)
+ }
+}
+
+func TestGaugeSetCurrentTime(t *testing.T) {
+ g := NewGauge(GaugeOpts{
+ Name: "test_name",
+ Help: "test help",
+ })
+ g.SetToCurrentTime()
+ unixTime := float64(time.Now().Unix())
+
+ m := &dto.Metric{}
+ g.Write(m)
+
+ delta := unixTime - m.GetGauge().GetValue()
+ // This is just a smoke test to make sure SetToCurrentTime is not
+ // totally off. Tests with current time involved are hard...
+ if math.Abs(delta) > 5 {
+ t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 00000000..6dea674c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go
new file mode 100644
index 00000000..9a8858cb
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/go_collector_test.go
@@ -0,0 +1,123 @@
+package prometheus
+
+import (
+ "runtime"
+ "testing"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestGoCollector(t *testing.T) {
+ var (
+ c = NewGoCollector()
+ ch = make(chan Metric)
+ waitc = make(chan struct{})
+ closec = make(chan struct{})
+ old = -1
+ )
+ defer close(closec)
+
+ go func() {
+ c.Collect(ch)
+ go func(c <-chan struct{}) {
+ <-c
+ }(closec)
+ <-waitc
+ c.Collect(ch)
+ }()
+
+ for {
+ select {
+ case metric := <-ch:
+ switch m := metric.(type) {
+ // Attention, this also catches Counter...
+ case Gauge:
+ pb := &dto.Metric{}
+ m.Write(pb)
+ if pb.GetGauge() == nil {
+ continue
+ }
+
+ if old == -1 {
+ old = int(pb.GetGauge().GetValue())
+ close(waitc)
+ continue
+ }
+
+ if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {
+ // TODO: This is flaky in highly concurrent situations.
+ t.Errorf("want 1 new goroutine, got %d", diff)
+ }
+
+ // GoCollector performs two sends per call.
+ // On line 27 we need to receive the second send
+ // to shut down cleanly.
+ <-ch
+ return
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("expected collect timed out")
+ }
+ }
+}
+
+func TestGCCollector(t *testing.T) {
+ var (
+ c = NewGoCollector()
+ ch = make(chan Metric)
+ waitc = make(chan struct{})
+ closec = make(chan struct{})
+ oldGC uint64
+ oldPause float64
+ )
+ defer close(closec)
+
+ go func() {
+ c.Collect(ch)
+ // force GC
+ runtime.GC()
+ <-waitc
+ c.Collect(ch)
+ }()
+
+ first := true
+ for {
+ select {
+ case metric := <-ch:
+ switch m := metric.(type) {
+ case *constSummary, *value:
+ pb := &dto.Metric{}
+ m.Write(pb)
+ if pb.GetSummary() == nil {
+ continue
+ }
+
+ if len(pb.GetSummary().Quantile) != 5 {
+ t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile))
+ }
+ for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {
+ if *pb.GetSummary().Quantile[idx].Quantile != want {
+ t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want)
+ }
+ }
+ if first {
+ first = false
+ oldGC = *pb.GetSummary().SampleCount
+ oldPause = *pb.GetSummary().SampleSum
+ close(waitc)
+ continue
+ }
+ if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {
+ t.Errorf("want 1 new garbage collection run, got %d", diff)
+ }
+ if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {
+ t.Errorf("want moar pause, got %f", diff)
+ }
+ return
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("expected collect timed out")
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge.go b/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
new file mode 100644
index 00000000..11533374
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
@@ -0,0 +1,280 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package graphite provides a bridge to push Prometheus metrics to a Graphite
+// server.
+package graphite
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ defaultInterval = 15 * time.Second
+ millisecondsPerSecond = 1000
+)
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Ignore errors and try to push as many metrics to Graphite as possible.
+ ContinueOnError HandlerErrorHandling = iota
+
+ // Abort the push to Graphite upon the first error encountered.
+ AbortOnError
+)
+
+// Config defines the Graphite bridge config.
+type Config struct {
+ // The url to push data to. Required.
+ URL string
+
+ // The prefix for the pushed Graphite metrics. Defaults to empty string.
+ Prefix string
+
+ // The interval to use for pushing data to Graphite. Defaults to 15 seconds.
+ Interval time.Duration
+
+ // The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
+ Timeout time.Duration
+
+ // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
+ Gatherer prometheus.Gatherer
+
+ // The logger that messages are written to. Defaults to no logging.
+ Logger Logger
+
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided Logger
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+}
+
+// Bridge pushes metrics to the configured Graphite server.
+type Bridge struct {
+ url string
+ prefix string
+ interval time.Duration
+ timeout time.Duration
+
+ errorHandling HandlerErrorHandling
+ logger Logger
+
+ g prometheus.Gatherer
+}
+
+// Logger is the minimal interface Bridge needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// NewBridge returns a pointer to a new Bridge struct.
+func NewBridge(c *Config) (*Bridge, error) {
+ b := &Bridge{}
+
+ if c.URL == "" {
+ return nil, errors.New("missing URL")
+ }
+ b.url = c.URL
+
+ if c.Gatherer == nil {
+ b.g = prometheus.DefaultGatherer
+ } else {
+ b.g = c.Gatherer
+ }
+
+ if c.Logger != nil {
+ b.logger = c.Logger
+ }
+
+ if c.Prefix != "" {
+ b.prefix = c.Prefix
+ }
+
+ var z time.Duration
+ if c.Interval == z {
+ b.interval = defaultInterval
+ } else {
+ b.interval = c.Interval
+ }
+
+ if c.Timeout == z {
+ b.timeout = defaultInterval
+ } else {
+ b.timeout = c.Timeout
+ }
+
+ b.errorHandling = c.ErrorHandling
+
+ return b, nil
+}
+
+// Run starts the event loop that pushes Prometheus metrics to Graphite at the
+// configured interval.
+func (b *Bridge) Run(ctx context.Context) {
+ ticker := time.NewTicker(b.interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if err := b.Push(); err != nil && b.logger != nil {
+ b.logger.Println("error pushing to Graphite:", err)
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// Push pushes Prometheus metrics to the configured Graphite server.
+func (b *Bridge) Push() error {
+ mfs, err := b.g.Gather()
+ if err != nil || len(mfs) == 0 {
+ switch b.errorHandling {
+ case AbortOnError:
+ return err
+ case ContinueOnError:
+ if b.logger != nil {
+ b.logger.Println("continue on error:", err)
+ }
+ default:
+ panic("unrecognized error handling value")
+ }
+ }
+
+ conn, err := net.DialTimeout("tcp", b.url, b.timeout)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ return writeMetrics(conn, mfs, b.prefix, model.Now())
+}
+
+func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
+ vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
+ Timestamp: now,
+ }, mfs...)
+ if err != nil {
+ return err
+ }
+
+ buf := bufio.NewWriter(w)
+ for _, s := range vec {
+ if err := writeSanitized(buf, prefix); err != nil {
+ return err
+ }
+ if err := buf.WriteByte('.'); err != nil {
+ return err
+ }
+ if err := writeMetric(buf, s.Metric); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
+ return err
+ }
+ if err := buf.Flush(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writeMetric(buf *bufio.Writer, m model.Metric) error {
+ metricName, hasName := m[model.MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != model.MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
+ }
+ }
+
+ var err error
+ switch numLabels {
+ case 0:
+ if hasName {
+ return writeSanitized(buf, string(metricName))
+ }
+ default:
+ sort.Strings(labelStrings)
+ if err = writeSanitized(buf, string(metricName)); err != nil {
+ return err
+ }
+ for _, s := range labelStrings {
+ if err = buf.WriteByte('.'); err != nil {
+ return err
+ }
+ if err = writeSanitized(buf, s); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func writeSanitized(buf *bufio.Writer, s string) error {
+ prevUnderscore := false
+
+ for _, c := range s {
+ c = replaceInvalidRune(c)
+ if c == '_' {
+ if prevUnderscore {
+ continue
+ }
+ prevUnderscore = true
+ } else {
+ prevUnderscore = false
+ }
+ if _, err := buf.WriteRune(c); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func replaceInvalidRune(c rune) rune {
+ if c == ' ' {
+ return '.'
+ }
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
+ return '_'
+ }
+ return c
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
new file mode 100644
index 00000000..c2b274c6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
@@ -0,0 +1,309 @@
+package graphite
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "log"
+ "net"
+ "os"
+ "regexp"
+ "testing"
+ "time"
+
+ "github.com/prometheus/common/model"
+ "golang.org/x/net/context"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestSanitize(t *testing.T) {
+ testCases := []struct {
+ in, out string
+ }{
+ {in: "hello", out: "hello"},
+ {in: "hE/l1o", out: "hE_l1o"},
+ {in: "he,*ll(.o", out: "he_ll_o"},
+ {in: "hello_there%^&", out: "hello_there_"},
+ }
+
+ var buf bytes.Buffer
+ w := bufio.NewWriter(&buf)
+
+ for i, tc := range testCases {
+ if err := writeSanitized(w, tc.in); err != nil {
+ t.Fatalf("write failed: %v", err)
+ }
+ if err := w.Flush(); err != nil {
+ t.Fatalf("flush failed: %v", err)
+ }
+
+ if want, got := tc.out, buf.String(); want != got {
+ t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
+ }
+
+ buf.Reset()
+ }
+}
+
+func TestWriteSummary(t *testing.T) {
+ sumVec := prometheus.NewSummaryVec(
+ prometheus.SummaryOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"labelname"},
+ )
+
+ sumVec.WithLabelValues("val1").Observe(float64(10))
+ sumVec.WithLabelValues("val1").Observe(float64(20))
+ sumVec.WithLabelValues("val1").Observe(float64(30))
+ sumVec.WithLabelValues("val2").Observe(float64(20))
+ sumVec.WithLabelValues("val2").Observe(float64(30))
+ sumVec.WithLabelValues("val2").Observe(float64(40))
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(sumVec)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
+prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1 3 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
+prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2 3 1477043
+`
+
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestWriteHistogram(t *testing.T) {
+ histVec := prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ Buckets: []float64{0.01, 0.02, 0.05, 0.1},
+ },
+ []string{"labelname"},
+ )
+
+ histVec.WithLabelValues("val1").Observe(float64(10))
+ histVec.WithLabelValues("val1").Observe(float64(20))
+ histVec.WithLabelValues("val1").Observe(float64(30))
+ histVec.WithLabelValues("val2").Observe(float64(20))
+ histVec.WithLabelValues("val2").Observe(float64(30))
+ histVec.WithLabelValues("val2").Observe(float64(40))
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(histVec)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
+`
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestToReader(t *testing.T) {
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(cntVec)
+
+ want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
+prefix.name.constname.constvalue.labelname.val2 1 1477043
+`
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ now := model.Time(1477043083)
+ var buf bytes.Buffer
+ err = writeMetrics(&buf, mfs, "prefix", now)
+ if err != nil {
+ t.Fatalf("error: %v", err)
+ }
+
+ if got := buf.String(); want != got {
+ t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+ }
+}
+
+func TestPush(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+ reg.MustRegister(cntVec)
+
+ host := "localhost"
+ port := ":56789"
+ b, err := NewBridge(&Config{
+ URL: host + port,
+ Gatherer: reg,
+ Prefix: "prefix",
+ })
+ if err != nil {
+ t.Fatalf("error creating bridge: %v", err)
+ }
+
+ nmg, err := newMockGraphite(port)
+ if err != nil {
+ t.Fatalf("error creating mock graphite: %v", err)
+ }
+ defer nmg.Close()
+
+ err = b.Push()
+ if err != nil {
+ t.Fatalf("error pushing: %v", err)
+ }
+
+ wants := []string{
+ "prefix.name.constname.constvalue.labelname.val1 1",
+ "prefix.name.constname.constvalue.labelname.val2 1",
+ }
+
+ select {
+ case got := <-nmg.readc:
+ for _, want := range wants {
+ matched, err := regexp.MatchString(want, got)
+ if err != nil {
+ t.Fatalf("error pushing: %v", err)
+ }
+ if !matched {
+ t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
+ }
+ }
+ return
+ case err := <-nmg.errc:
+ t.Fatalf("error reading push: %v", err)
+ case <-time.After(50 * time.Millisecond):
+ t.Fatalf("no result from graphite server")
+ }
+}
+
+func newMockGraphite(port string) (*mockGraphite, error) {
+ readc := make(chan string)
+ errc := make(chan error)
+ ln, err := net.Listen("tcp", port)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ conn, err := ln.Accept()
+ if err != nil {
+ errc <- err
+ }
+ var b bytes.Buffer
+ io.Copy(&b, conn)
+ readc <- b.String()
+ }()
+
+ return &mockGraphite{
+ readc: readc,
+ errc: errc,
+ Listener: ln,
+ }, nil
+}
+
+type mockGraphite struct {
+ readc chan string
+ errc chan error
+
+ net.Listener
+}
+
+func ExampleBridge() {
+ b, err := NewBridge(&Config{
+ URL: "graphite.example.org:3099",
+ Gatherer: prometheus.DefaultGatherer,
+ Prefix: "prefix",
+ Interval: 15 * time.Second,
+ Timeout: 10 * time.Second,
+ ErrorHandling: AbortOnError,
+ Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ go func() {
+ // Start something in a goroutine that uses metrics.
+ }()
+
+ // Push initial metrics to Graphite. Fail fast if the push fails.
+ if err := b.Push(); err != nil {
+ panic(err)
+ }
+
+ // Create a Context to control stopping the Run() loop that pushes
+ // metrics to Graphite.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Start pushing metrics to Graphite in the Run() loop.
+ b.Run(ctx)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 00000000..9719e8fa
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,444 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ selfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram_test.go
new file mode 100644
index 00000000..a73b1af1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/histogram_test.go
@@ -0,0 +1,348 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "testing/quick"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func benchmarkHistogramObserve(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewHistogram(HistogramOpts{})
+
+ for i := 0; i < w; i++ {
+ go func() {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Observe(float64(i))
+ }
+
+ wg.Done()
+ }()
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkHistogramObserve1(b *testing.B) {
+ benchmarkHistogramObserve(1, b)
+}
+
+func BenchmarkHistogramObserve2(b *testing.B) {
+ benchmarkHistogramObserve(2, b)
+}
+
+func BenchmarkHistogramObserve4(b *testing.B) {
+ benchmarkHistogramObserve(4, b)
+}
+
+func BenchmarkHistogramObserve8(b *testing.B) {
+ benchmarkHistogramObserve(8, b)
+}
+
+func benchmarkHistogramWrite(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewHistogram(HistogramOpts{})
+
+ for i := 0; i < 1000000; i++ {
+ s.Observe(float64(i))
+ }
+
+ for j := 0; j < w; j++ {
+ outs := make([]dto.Metric, b.N)
+
+ go func(o []dto.Metric) {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Write(&o[i])
+ }
+
+ wg.Done()
+ }(outs)
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkHistogramWrite1(b *testing.B) {
+ benchmarkHistogramWrite(1, b)
+}
+
+func BenchmarkHistogramWrite2(b *testing.B) {
+ benchmarkHistogramWrite(2, b)
+}
+
+func BenchmarkHistogramWrite4(b *testing.B) {
+ benchmarkHistogramWrite(4, b)
+}
+
+func BenchmarkHistogramWrite8(b *testing.B) {
+ benchmarkHistogramWrite(8, b)
+}
+
+func TestHistogramNonMonotonicBuckets(t *testing.T) {
+ testCases := map[string][]float64{
+ "not strictly monotonic": {1, 2, 2, 3},
+ "not monotonic at all": {1, 2, 4, 3, 5},
+ "have +Inf in the middle": {1, 2, math.Inf(+1), 3},
+ }
+ for name, buckets := range testCases {
+ func() {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name)
+ }
+ }()
+ _ = NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: buckets,
+ })
+ }()
+ }
+}
+
+// Intentionally adding +Inf here to test if that case is handled correctly.
+// Also, getCumulativeCounts depends on it.
+var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}
+
+func TestHistogramConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%5 + 1)
+ total := mutations * concLevel
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: testBuckets,
+ })
+
+ allVars := make([]float64, total)
+ var sampleSum float64
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ allVars[i*mutations+j] = v
+ sampleSum += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sum.Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ sort.Float64s(allVars)
+ start.Done()
+ end.Wait()
+
+ m := &dto.Metric{}
+ sum.Write(m)
+ if got, want := int(*m.Histogram.SampleCount), total; got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ wantCounts := getCumulativeCounts(allVars)
+
+ if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
+ t.Errorf("got %d buckets in protobuf, want %d", got, want)
+ }
+ for i, wantBound := range testBuckets {
+ if i == len(testBuckets)-1 {
+ break // No +Inf bucket in protobuf.
+ }
+ if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {
+ t.Errorf("got bound %f, want %f", gotBound, wantBound)
+ }
+ if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {
+ t.Errorf("got count %d, want %d", gotCount, wantCount)
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestHistogramVecConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%7 + 1)
+ vecLength := int(n%3 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ his := NewHistogramVec(
+ HistogramOpts{
+ Name: "test_histogram",
+ Help: "helpless",
+ Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},
+ },
+ []string{"label"},
+ )
+
+ allVars := make([][]float64, vecLength)
+ sampleSums := make([]float64, vecLength)
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ picks := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ pick := rand.Intn(vecLength)
+ picks[j] = pick
+ allVars[pick] = append(allVars[pick], v)
+ sampleSums[pick] += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ his.WithLabelValues(string('A' + picks[i])).Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ for _, vars := range allVars {
+ sort.Float64s(vars)
+ }
+ start.Done()
+ end.Wait()
+
+ for i := 0; i < vecLength; i++ {
+ m := &dto.Metric{}
+ s := his.WithLabelValues(string('A' + i))
+ s.Write(m)
+
+ if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {
+ t.Errorf("got %d buckets in protobuf, want %d", got, want)
+ }
+ if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ wantCounts := getCumulativeCounts(allVars[i])
+
+ for j, wantBound := range testBuckets {
+ if j == len(testBuckets)-1 {
+ break // No +Inf bucket in protobuf.
+ }
+ if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {
+ t.Errorf("got bound %f, want %f", gotBound, wantBound)
+ }
+ if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {
+ t.Errorf("got count %d, want %d", gotCount, wantCount)
+ }
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func getCumulativeCounts(vars []float64) []uint64 {
+ counts := make([]uint64, len(testBuckets))
+ for _, v := range vars {
+ for i := len(testBuckets) - 1; i >= 0; i-- {
+ if v > testBuckets[i] {
+ break
+ }
+ counts[i]++
+ }
+ }
+ return counts
+}
+
+func TestBuckets(t *testing.T) {
+ got := LinearBuckets(-15, 5, 6)
+ want := []float64{-15, -10, -5, 0, 5, 10}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("linear buckets: got %v, want %v", got, want)
+ }
+
+ got = ExponentialBuckets(100, 1.2, 3)
+ want = []float64{100, 120, 144}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("linear buckets: got %v, want %v", got, want)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 00000000..d74fb488
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,526 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is not instrumented).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// - It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
+//
+// Upcoming versions of this package will provide ways of instrumenting HTTP
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := computeApproximateRequestSize(r)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ out := make(chan int, 1)
+
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/http_test.go
new file mode 100644
index 00000000..7fd4077b
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/http_test.go
@@ -0,0 +1,154 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+type respBody string
+
+func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusTeapot)
+ w.Write([]byte(b))
+}
+
+func TestInstrumentHandler(t *testing.T) {
+ defer func(n nower) {
+ now = n.(nower)
+ }(now)
+
+ instant := time.Now()
+ end := instant.Add(30 * time.Second)
+ now = nowSeries(instant, end)
+ respBody := respBody("Howdy there!")
+
+ hndlr := InstrumentHandler("test-handler", respBody)
+
+ opts := SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": "test-handler"},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ }
+
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+ err := Register(reqCnt)
+ if err == nil {
+ t.Fatal("expected reqCnt to be registered already")
+ }
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ t.Fatal("unexpected registration error:", err)
+ }
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+ err = Register(reqDur)
+ if err == nil {
+ t.Fatal("expected reqDur to be registered already")
+ }
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ t.Fatal("unexpected registration error:", err)
+ }
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+ err = Register(reqSz)
+ if err == nil {
+ t.Fatal("expected reqSz to be registered already")
+ }
+ if _, ok := err.(AlreadyRegisteredError); !ok {
+ t.Fatal("unexpected registration error:", err)
+ }
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+ err = Register(resSz)
+ if err == nil {
+ t.Fatal("expected resSz to be registered already")
+ }
+ if _, ok := err.(AlreadyRegisteredError); !ok {
+ t.Fatal("unexpected registration error:", err)
+ }
+
+ reqCnt.Reset()
+
+ resp := httptest.NewRecorder()
+ req := &http.Request{
+ Method: "GET",
+ }
+
+ hndlr.ServeHTTP(resp, req)
+
+ if resp.Code != http.StatusTeapot {
+ t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
+ }
+ if string(resp.Body.Bytes()) != "Howdy there!" {
+ t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
+ }
+
+ out := &dto.Metric{}
+ reqDur.Write(out)
+ if want, got := "test-handler", out.Label[0].GetValue(); want != got {
+ t.Errorf("want label value %q in reqDur, got %q", want, got)
+ }
+ if want, got := uint64(1), out.Summary.GetSampleCount(); want != got {
+ t.Errorf("want sample count %d in reqDur, got %d", want, got)
+ }
+
+ out.Reset()
+ if want, got := 1, len(reqCnt.children); want != got {
+ t.Errorf("want %d children in reqCnt, got %d", want, got)
+ }
+ cnt, err := reqCnt.GetMetricWithLabelValues("get", "418")
+ if err != nil {
+ t.Fatal(err)
+ }
+ cnt.Write(out)
+ if want, got := "418", out.Label[0].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if want, got := "test-handler", out.Label[1].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if want, got := "get", out.Label[2].GetValue(); want != got {
+ t.Errorf("want label value %q in reqCnt, got %q", want, got)
+ }
+ if out.Counter == nil {
+ t.Fatal("expected non-nil counter in reqCnt")
+ }
+ if want, got := 1., out.Counter.GetValue(); want != got {
+ t.Errorf("want reqCnt of %f, got %f", want, got)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 00000000..d4063d98
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/metric_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/metric_test.go
new file mode 100644
index 00000000..7145f5e5
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/metric_test.go
@@ -0,0 +1,35 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "testing"
+
+func TestBuildFQName(t *testing.T) {
+ scenarios := []struct{ namespace, subsystem, name, result string }{
+ {"a", "b", "c", "a_b_c"},
+ {"", "b", "c", "b_c"},
+ {"a", "", "c", "a_c"},
+ {"", "", "c", "c"},
+ {"a", "b", "", ""},
+ {"a", "", "", ""},
+ {"", "b", "", ""},
+ {" ", "", "", ""},
+ }
+
+ for i, s := range scenarios {
+ if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {
+ t.Errorf("%d. want %s, got %s", i, want, got)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 00000000..94b2553e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,140 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, rss *Desc
+ startTime *Desc
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) Collector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go
new file mode 100644
index 00000000..c7acb47f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/process_collector_test.go
@@ -0,0 +1,58 @@
+package prometheus
+
+import (
+ "bytes"
+ "os"
+ "regexp"
+ "testing"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/procfs"
+)
+
+func TestProcessCollector(t *testing.T) {
+ if _, err := procfs.Self(); err != nil {
+ t.Skipf("skipping TestProcessCollector, procfs not available: %s", err)
+ }
+
+ registry := NewRegistry()
+ if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil {
+ t.Fatal(err)
+ }
+ if err := registry.Register(NewProcessCollectorPIDFn(
+ func() (int, error) { return os.Getpid(), nil }, "foobar"),
+ ); err != nil {
+ t.Fatal(err)
+ }
+
+ mfs, err := registry.Gather()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ for _, mf := range mfs {
+ if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for _, re := range []*regexp.Regexp{
+ regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("\nprocess_max_fds [1-9]"),
+ regexp.MustCompile("\nprocess_open_fds [1-9]"),
+ regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
+ regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
+ regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
+ regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
+ regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
+ regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
+ regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
+ } {
+ if !re.Match(buf.Bytes()) {
+ t.Errorf("want body to match %s\n%s", re, buf.String())
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 00000000..b6dd5a26
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package promhttp contains functions to create http.Handler instances to
+// expose Prometheus metrics via HTTP. In later versions of this package, it
+// will also contain tooling to instrument instances of http.Handler and
+// http.RoundTripper.
+//
+// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
+// you can create a handler for a custom registry or anything that implements
+// the Gatherer interface. It also allows to create handlers that act
+// differently on errors or allow to log errors.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
new file mode 100644
index 00000000..d4a7d4a7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
@@ -0,0 +1,137 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package promhttp
+
+import (
+ "bytes"
+ "errors"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type errorCollector struct{}
+
+func (e errorCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil)
+}
+
+func (e errorCollector) Collect(ch chan<- prometheus.Metric) {
+ ch <- prometheus.NewInvalidMetric(
+ prometheus.NewDesc("invalid_metric", "not helpful", nil, nil),
+ errors.New("collect error"),
+ )
+}
+
+func TestHandlerErrorHandling(t *testing.T) {
+
+ // Create a registry that collects a MetricFamily with two elements,
+ // another with one, and reports an error.
+ reg := prometheus.NewRegistry()
+
+ cnt := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "the_count",
+ Help: "Ah-ah-ah! Thunder and lightning!",
+ })
+ reg.MustRegister(cnt)
+
+ cntVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+ cntVec.WithLabelValues("val1").Inc()
+ cntVec.WithLabelValues("val2").Inc()
+ reg.MustRegister(cntVec)
+
+ reg.MustRegister(errorCollector{})
+
+ logBuf := &bytes.Buffer{}
+ logger := log.New(logBuf, "", 0)
+
+ writer := httptest.NewRecorder()
+ request, _ := http.NewRequest("GET", "/", nil)
+ request.Header.Add("Accept", "test/plain")
+
+ errorHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: HTTPErrorOnError,
+ })
+ continueHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: ContinueOnError,
+ })
+ panicHandler := HandlerFor(reg, HandlerOpts{
+ ErrorLog: logger,
+ ErrorHandling: PanicOnError,
+ })
+ wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
+`
+ wantErrorBody := `An error has occurred during metrics gathering:
+
+error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error
+`
+ wantOKBody := `# HELP name docstring
+# TYPE name counter
+name{constname="constvalue",labelname="val1"} 1
+name{constname="constvalue",labelname="val2"} 1
+# HELP the_count Ah-ah-ah! Thunder and lightning!
+# TYPE the_count counter
+the_count 0
+`
+
+ errorHandler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusInternalServerError; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got := logBuf.String(); got != wantMsg {
+ t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
+ }
+ if got := writer.Body.String(); got != wantErrorBody {
+ t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
+ }
+ logBuf.Reset()
+ writer.Body.Reset()
+ writer.Code = http.StatusOK
+
+ continueHandler.ServeHTTP(writer, request)
+ if got, want := writer.Code, http.StatusOK; got != want {
+ t.Errorf("got HTTP status code %d, want %d", got, want)
+ }
+ if got := logBuf.String(); got != wantMsg {
+ t.Errorf("got log message %q, want %q", got, wantMsg)
+ }
+ if got := writer.Body.String(); got != wantOKBody {
+ t.Errorf("got body %q, want %q", got, wantOKBody)
+ }
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Error("expected panic from panicHandler")
+ }
+ }()
+ panicHandler.ServeHTTP(writer, request)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
new file mode 100644
index 00000000..dd5c10a5
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package push_test
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/push"
+)
+
+var (
+ completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_completion_timestamp_seconds",
+ Help: "The timestamp of the last completion of a DB backup, successful or not.",
+ })
+ successTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_success_timestamp_seconds",
+ Help: "The timestamp of the last successful completion of a DB backup.",
+ })
+ duration = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_duration_seconds",
+ Help: "The duration of the last DB backup in seconds.",
+ })
+ records = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_records_processed",
+ Help: "The number of records processed in the last DB backup.",
+ })
+)
+
+func performBackup() (int, error) {
+ // Perform the backup and return the number of backed up records and any
+ // applicable error.
+ // ...
+ return 42, nil
+}
+
+func ExampleAddFromGatherer() {
+ registry := prometheus.NewRegistry()
+ registry.MustRegister(completionTime, duration, records)
+ // Note that successTime is not registered at this time.
+
+ start := time.Now()
+ n, err := performBackup()
+ records.Set(float64(n))
+ duration.Set(time.Since(start).Seconds())
+ completionTime.SetToCurrentTime()
+ if err != nil {
+ fmt.Println("DB backup failed:", err)
+ } else {
+ // Only now register successTime.
+ registry.MustRegister(successTime)
+ successTime.SetToCurrentTime()
+ }
+ // AddFromGatherer is used here rather than FromGatherer to not delete a
+ // previously pushed success timestamp in case of a failure of this
+ // backup.
+ if err := push.AddFromGatherer(
+ "db_backup", nil,
+ "http://pushgateway:9091",
+ registry,
+ ); err != nil {
+ fmt.Println("Could not push to Pushgateway:", err)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/push/examples_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/push/examples_test.go
new file mode 100644
index 00000000..7e0ac66a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/push/examples_test.go
@@ -0,0 +1,36 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package push_test
+
+import (
+ "fmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/push"
+)
+
+func ExampleCollectors() {
+ completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "db_backup_last_completion_timestamp_seconds",
+ Help: "The timestamp of the last successful completion of a DB backup.",
+ })
+ completionTime.SetToCurrentTime()
+ if err := push.Collectors(
+ "db_backup", push.HostnameGroupingKey(),
+ "http://pushgateway:9091",
+ completionTime,
+ ); err != nil {
+ fmt.Println("Could not push completion time to Pushgateway:", err)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/src/github.com/prometheus/client_golang/prometheus/push/push.go
new file mode 100644
index 00000000..8fb6f5f1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/push/push.go
@@ -0,0 +1,172 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package push provides functions to push metrics to a Pushgateway. The metrics
+// to push are either collected from a provided registry, or from explicitly
+// listed collectors.
+//
+// See the documentation of the Pushgateway to understand the meaning of the
+// grouping parameters and the differences between push.Registry and
+// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
+// on the other hand: https://github.com/prometheus/pushgateway
+package push
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/common/model"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const contentTypeHeader = "Content-Type"
+
+// FromGatherer triggers a metric collection by the provided Gatherer (which is
+// usually implemented by a prometheus.Registry) and pushes all gathered metrics
+// to the Pushgateway specified by url, using the provided job name and the
+// (optional) further grouping labels (the grouping map may be nil). See the
+// Pushgateway documentation for detailed implications of the job and other
+// grouping labels. Neither the job name nor any grouping label value may
+// contain a "/". The metrics pushed must not contain a job label of their own
+// nor any of the grouping labels.
+//
+// You can use just host:port or ip:port as url, in which case 'http://' is
+// added automatically. You can also include the schema in the URL. However, do
+// not include the '/metrics/jobs/...' part.
+//
+// Note that all previously pushed metrics with the same job and other grouping
+// labels will be replaced with the metrics pushed by this call. (It uses HTTP
+// method 'PUT' to push to the Pushgateway.)
+func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
+ return push(job, grouping, url, g, "PUT")
+}
+
+// AddFromGatherer works like FromGatherer, but only previously pushed metrics
+// with the same name (and the same job and other grouping labels) will be
+// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
+func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
+ return push(job, grouping, url, g, "POST")
+}
+
+func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
+ if !strings.Contains(pushURL, "://") {
+ pushURL = "http://" + pushURL
+ }
+ if strings.HasSuffix(pushURL, "/") {
+ pushURL = pushURL[:len(pushURL)-1]
+ }
+
+ if strings.Contains(job, "/") {
+ return fmt.Errorf("job contains '/': %s", job)
+ }
+ urlComponents := []string{url.QueryEscape(job)}
+ for ln, lv := range grouping {
+ if !model.LabelName(ln).IsValid() {
+ return fmt.Errorf("grouping label has invalid name: %s", ln)
+ }
+ if strings.Contains(lv, "/") {
+ return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
+ }
+ urlComponents = append(urlComponents, ln, lv)
+ }
+ pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
+
+ mfs, err := g.Gather()
+ if err != nil {
+ return err
+ }
+ buf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+ // Check for pre-existing grouping labels:
+ for _, mf := range mfs {
+ for _, m := range mf.GetMetric() {
+ for _, l := range m.GetLabel() {
+ if l.GetName() == "job" {
+ return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
+ }
+ if _, ok := grouping[l.GetName()]; ok {
+ return fmt.Errorf(
+ "pushed metric %s (%s) already contains grouping label %s",
+ mf.GetName(), m, l.GetName(),
+ )
+ }
+ }
+ }
+ enc.Encode(mf)
+ }
+ req, err := http.NewRequest(method, pushURL, buf)
+ if err != nil {
+ return err
+ }
+ req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 202 {
+ body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
+ return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
+ }
+ return nil
+}
+
+// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
+// it collects from the provided collectors directly. It is a convenient way to
+// push only a few metrics.
+func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
+ return pushCollectors(job, grouping, url, "PUT", collectors...)
+}
+
+// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
+// Instead, it collects from the provided collectors directly. It is a
+// convenient way to push only a few metrics.
+func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
+ return pushCollectors(job, grouping, url, "POST", collectors...)
+}
+
+func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
+ r := prometheus.NewRegistry()
+ for _, collector := range collectors {
+ if err := r.Register(collector); err != nil {
+ return err
+ }
+ }
+ return push(job, grouping, url, r, method)
+}
+
+// HostnameGroupingKey returns a label map with the only entry
+// {instance=""}. This can be conveniently used as the grouping
+// parameter if metrics should be pushed with the hostname as label. The
+// returned map is created upon each call so that the caller is free to add more
+// labels to the map.
+func HostnameGroupingKey() map[string]string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ return map[string]string{"instance": "unknown"}
+ }
+ return map[string]string{"instance": hostname}
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/push/push_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/push/push_test.go
new file mode 100644
index 00000000..28ed9b74
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/push/push_test.go
@@ -0,0 +1,176 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package push
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestPush(t *testing.T) {
+
+ var (
+ lastMethod string
+ lastBody []byte
+ lastPath string
+ )
+
+ host, err := os.Hostname()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // Fake a Pushgateway that always responds with 202.
+ pgwOK := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ lastMethod = r.Method
+ var err error
+ lastBody, err = ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ lastPath = r.URL.EscapedPath()
+ w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
+ w.WriteHeader(http.StatusAccepted)
+ }),
+ )
+ defer pgwOK.Close()
+
+ // Fake a Pushgateway that always responds with 500.
+ pgwErr := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "fake error", http.StatusInternalServerError)
+ }),
+ )
+ defer pgwErr.Close()
+
+ metric1 := prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "testname1",
+ Help: "testhelp1",
+ })
+ metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "testname2",
+ Help: "testhelp2",
+ ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
+ })
+
+ reg := prometheus.NewRegistry()
+ reg.MustRegister(metric1)
+ reg.MustRegister(metric2)
+
+ mfs, err := reg.Gather()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ t.Fatal(err)
+ }
+ }
+ wantBody := buf.Bytes()
+
+ // PushCollectors, all good.
+ if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "PUT" {
+ t.Error("want method PUT for PushCollectors, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob/instance/"+host {
+ t.Error("unexpected path:", lastPath)
+ }
+
+ // PushAddCollectors, with nil grouping, all good.
+ if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "POST" {
+ t.Error("want method POST for PushAddCollectors, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob" {
+ t.Error("unexpected path:", lastPath)
+ }
+
+ // PushCollectors with a broken PGW.
+ if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push to broken Pushgateway succeeded")
+ } else {
+ if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
+ t.Errorf("got error %q, want %q", got, want)
+ }
+ }
+
+ // PushCollectors with invalid grouping or job.
+ if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with grouping contained in metrics succeeded")
+ }
+ if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid job value succeeded")
+ }
+ if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid grouping succeeded")
+ }
+ if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil {
+ t.Error("push with invalid grouping succeeded")
+ }
+
+ // Push registry, all good.
+ if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "PUT" {
+ t.Error("want method PUT for Push, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+
+ // PushAdd registry, all good.
+ if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil {
+ t.Fatal(err)
+ }
+ if lastMethod != "POST" {
+ t.Error("want method POSTT for PushAdd, got", lastMethod)
+ }
+ if bytes.Compare(lastBody, wantBody) != 0 {
+ t.Errorf("got body %v, want %v", lastBody, wantBody)
+ }
+ if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
+ t.Error("unexpected path:", lastPath)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 00000000..78d5f193
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,755 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ dh := hashNew()
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/registry_test.go
new file mode 100644
index 00000000..d016a156
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/registry_test.go
@@ -0,0 +1,546 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+package prometheus_test
+
+import (
+ "bytes"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+func testHandler(t testing.TB) {
+
+ metricVec := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "name",
+ Help: "docstring",
+ ConstLabels: prometheus.Labels{"constname": "constvalue"},
+ },
+ []string{"labelname"},
+ )
+
+ metricVec.WithLabelValues("val1").Inc()
+ metricVec.WithLabelValues("val2").Inc()
+
+ externalMetricFamily := &dto.MetricFamily{
+ Name: proto.String("externalname"),
+ Help: proto.String("externaldocstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("externalconstname"),
+ Value: proto.String("externalconstvalue"),
+ },
+ {
+ Name: proto.String("externallabelname"),
+ Value: proto.String("externalval1"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ },
+ }
+ externalBuf := &bytes.Buffer{}
+ enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim)
+ if err := enc.Encode(externalMetricFamily); err != nil {
+ t.Fatal(err)
+ }
+ externalMetricFamilyAsBytes := externalBuf.Bytes()
+ externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring
+# TYPE externalname counter
+externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1
+`)
+ externalMetricFamilyAsProtoText := []byte(`name: "externalname"
+help: "externaldocstring"
+type: COUNTER
+metric: <
+ label: <
+ name: "externalconstname"
+ value: "externalconstvalue"
+ >
+ label: <
+ name: "externallabelname"
+ value: "externalval1"
+ >
+ counter: <
+ value: 1
+ >
+>
+
+`)
+ externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: >
+`)
+
+ expectedMetricFamily := &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("docstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(1),
+ },
+ },
+ },
+ }
+ buf := &bytes.Buffer{}
+ enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
+ if err := enc.Encode(expectedMetricFamily); err != nil {
+ t.Fatal(err)
+ }
+ expectedMetricFamilyAsBytes := buf.Bytes()
+ expectedMetricFamilyAsText := []byte(`# HELP name docstring
+# TYPE name counter
+name{constname="constvalue",labelname="val1"} 1
+name{constname="constvalue",labelname="val2"} 1
+`)
+ expectedMetricFamilyAsProtoText := []byte(`name: "name"
+help: "docstring"
+type: COUNTER
+metric: <
+ label: <
+ name: "constname"
+ value: "constvalue"
+ >
+ label: <
+ name: "labelname"
+ value: "val1"
+ >
+ counter: <
+ value: 1
+ >
+>
+metric: <
+ label: <
+ name: "constname"
+ value: "constvalue"
+ >
+ label: <
+ name: "labelname"
+ value: "val2"
+ >
+ counter: <
+ value: 1
+ >
+>
+
+`)
+ expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: >
+`)
+
+ externalMetricFamilyWithSameName := &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("docstring"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ {
+ Label: []*dto.LabelPair{
+ {
+ Name: proto.String("constname"),
+ Value: proto.String("constvalue"),
+ },
+ {
+ Name: proto.String("labelname"),
+ Value: proto.String("different_val"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(42),
+ },
+ },
+ },
+ }
+
+ expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: >
+`)
+
+ type output struct {
+ headers map[string]string
+ body []byte
+ }
+
+ var scenarios = []struct {
+ headers map[string]string
+ out output
+ collector prometheus.Collector
+ externalMF []*dto.MetricFamily
+ }{
+ { // 0
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, dings/bums;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 1
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, application/quark;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 2
+ headers: map[string]string{
+ "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 3
+ headers: map[string]string{
+ "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 4
+ headers: map[string]string{
+ "Accept": "application/json",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: expectedMetricFamilyAsText,
+ },
+ collector: metricVec,
+ },
+ { // 5
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: expectedMetricFamilyAsBytes,
+ },
+ collector: metricVec,
+ },
+ { // 6
+ headers: map[string]string{
+ "Accept": "application/json",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: externalMetricFamilyAsText,
+ },
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 7
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: externalMetricFamilyAsBytes,
+ },
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 8
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsBytes,
+ expectedMetricFamilyAsBytes,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 9
+ headers: map[string]string{
+ "Accept": "text/plain",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: []byte{},
+ },
+ },
+ { // 10
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: expectedMetricFamilyAsText,
+ },
+ collector: metricVec,
+ },
+ { // 11
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `text/plain; version=0.0.4`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsText,
+ expectedMetricFamilyAsText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 12
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsBytes,
+ expectedMetricFamilyAsBytes,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 13
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoText,
+ expectedMetricFamilyAsProtoText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 14
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoCompactText,
+ expectedMetricFamilyAsProtoCompactText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{externalMetricFamily},
+ },
+ { // 15
+ headers: map[string]string{
+ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text",
+ },
+ out: output{
+ headers: map[string]string{
+ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,
+ },
+ body: bytes.Join(
+ [][]byte{
+ externalMetricFamilyAsProtoCompactText,
+ expectedMetricFamilyMergedWithExternalAsProtoCompactText,
+ },
+ []byte{},
+ ),
+ },
+ collector: metricVec,
+ externalMF: []*dto.MetricFamily{
+ externalMetricFamily,
+ externalMetricFamilyWithSameName,
+ },
+ },
+ }
+ for i, scenario := range scenarios {
+ registry := prometheus.NewPedanticRegistry()
+ gatherer := prometheus.Gatherer(registry)
+ if scenario.externalMF != nil {
+ gatherer = prometheus.Gatherers{
+ registry,
+ prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {
+ return scenario.externalMF, nil
+ }),
+ }
+ }
+
+ if scenario.collector != nil {
+ registry.Register(scenario.collector)
+ }
+ writer := httptest.NewRecorder()
+ handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
+ request, _ := http.NewRequest("GET", "/", nil)
+ for key, value := range scenario.headers {
+ request.Header.Add(key, value)
+ }
+ handler(writer, request)
+
+ for key, value := range scenario.out.headers {
+ if writer.HeaderMap.Get(key) != value {
+ t.Errorf(
+ "%d. expected %q for header %q, got %q",
+ i, value, key, writer.Header().Get(key),
+ )
+ }
+ }
+
+ if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) {
+ t.Errorf(
+ "%d. expected body:\n%s\ngot body:\n%s\n",
+ i, scenario.out.body, writer.Body.Bytes(),
+ )
+ }
+ }
+}
+
+func TestHandler(t *testing.T) {
+ testHandler(t)
+}
+
+func BenchmarkHandler(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testHandler(b)
+ }
+}
+
+func TestRegisterWithOrGet(t *testing.T) {
+ // Replace the default registerer just to be sure. This is bad, but this
+ // whole test will go away once RegisterOrGet is removed.
+ oldRegisterer := prometheus.DefaultRegisterer
+ defer func() {
+ prometheus.DefaultRegisterer = oldRegisterer
+ }()
+ prometheus.DefaultRegisterer = prometheus.NewRegistry()
+ original := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "test",
+ Help: "help",
+ },
+ []string{"foo", "bar"},
+ )
+ equalButNotSame := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "test",
+ Help: "help",
+ },
+ []string{"foo", "bar"},
+ )
+ var err error
+ if err = prometheus.Register(original); err != nil {
+ t.Fatal(err)
+ }
+ if err = prometheus.Register(equalButNotSame); err == nil {
+ t.Fatal("expected error when registringe equal collector")
+ }
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ if are.ExistingCollector != original {
+ t.Error("expected original collector but got something else")
+ }
+ if are.ExistingCollector == equalButNotSame {
+ t.Error("expected original callector but got new one")
+ }
+ } else {
+ t.Error("unexpected error:", err)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 00000000..82b88501
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,543 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if opts.Objectives == nil {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/summary_test.go
new file mode 100644
index 00000000..4be59b5a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/summary_test.go
@@ -0,0 +1,388 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "testing"
+ "testing/quick"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestSummaryWithDefaultObjectives(t *testing.T) {
+ reg := NewRegistry()
+ summaryWithDefaultObjectives := NewSummary(SummaryOpts{
+ Name: "default_objectives",
+ Help: "Test help.",
+ })
+ if err := reg.Register(summaryWithDefaultObjectives); err != nil {
+ t.Error(err)
+ }
+
+ m := &dto.Metric{}
+ if err := summaryWithDefaultObjectives.Write(m); err != nil {
+ t.Error(err)
+ }
+ if len(m.GetSummary().Quantile) != len(DefObjectives) {
+ t.Error("expected default objectives in summary")
+ }
+}
+
+func TestSummaryWithoutObjectives(t *testing.T) {
+ reg := NewRegistry()
+ summaryWithEmptyObjectives := NewSummary(SummaryOpts{
+ Name: "empty_objectives",
+ Help: "Test help.",
+ Objectives: map[float64]float64{},
+ })
+ if err := reg.Register(summaryWithEmptyObjectives); err != nil {
+ t.Error(err)
+ }
+
+ m := &dto.Metric{}
+ if err := summaryWithEmptyObjectives.Write(m); err != nil {
+ t.Error(err)
+ }
+ if len(m.GetSummary().Quantile) != 0 {
+ t.Error("expected no objectives in summary")
+ }
+}
+
+func benchmarkSummaryObserve(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewSummary(SummaryOpts{})
+
+ for i := 0; i < w; i++ {
+ go func() {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Observe(float64(i))
+ }
+
+ wg.Done()
+ }()
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkSummaryObserve1(b *testing.B) {
+ benchmarkSummaryObserve(1, b)
+}
+
+func BenchmarkSummaryObserve2(b *testing.B) {
+ benchmarkSummaryObserve(2, b)
+}
+
+func BenchmarkSummaryObserve4(b *testing.B) {
+ benchmarkSummaryObserve(4, b)
+}
+
+func BenchmarkSummaryObserve8(b *testing.B) {
+ benchmarkSummaryObserve(8, b)
+}
+
+func benchmarkSummaryWrite(w int, b *testing.B) {
+ b.StopTimer()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(w)
+
+ g := new(sync.WaitGroup)
+ g.Add(1)
+
+ s := NewSummary(SummaryOpts{})
+
+ for i := 0; i < 1000000; i++ {
+ s.Observe(float64(i))
+ }
+
+ for j := 0; j < w; j++ {
+ outs := make([]dto.Metric, b.N)
+
+ go func(o []dto.Metric) {
+ g.Wait()
+
+ for i := 0; i < b.N; i++ {
+ s.Write(&o[i])
+ }
+
+ wg.Done()
+ }(outs)
+ }
+
+ b.StartTimer()
+ g.Done()
+ wg.Wait()
+}
+
+func BenchmarkSummaryWrite1(b *testing.B) {
+ benchmarkSummaryWrite(1, b)
+}
+
+func BenchmarkSummaryWrite2(b *testing.B) {
+ benchmarkSummaryWrite(2, b)
+}
+
+func BenchmarkSummaryWrite4(b *testing.B) {
+ benchmarkSummaryWrite(4, b)
+}
+
+func BenchmarkSummaryWrite8(b *testing.B) {
+ benchmarkSummaryWrite(8, b)
+}
+
+func TestSummaryConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%5 + 1)
+ total := mutations * concLevel
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewSummary(SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ })
+
+ allVars := make([]float64, total)
+ var sampleSum float64
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ allVars[i*mutations+j] = v
+ sampleSum += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for _, v := range vals {
+ sum.Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ sort.Float64s(allVars)
+ start.Done()
+ end.Wait()
+
+ m := &dto.Metric{}
+ sum.Write(m)
+ if got, want := int(*m.Summary.SampleCount), total; got != want {
+ t.Errorf("got sample count %d, want %d", got, want)
+ }
+ if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f, want %f", got, want)
+ }
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ for i, wantQ := range objectives {
+ ε := DefObjectives[wantQ]
+ gotQ := *m.Summary.Quantile[i].Quantile
+ gotV := *m.Summary.Quantile[i].Value
+ min, max := getBounds(allVars, wantQ, ε)
+ if gotQ != wantQ {
+ t.Errorf("got quantile %f, want %f", gotQ, wantQ)
+ }
+ if gotV < min || gotV > max {
+ t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSummaryVecConcurrency(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ }
+
+ rand.Seed(42)
+
+ objectives := make([]float64, 0, len(DefObjectives))
+ for qu := range DefObjectives {
+
+ objectives = append(objectives, qu)
+ }
+ sort.Float64s(objectives)
+
+ it := func(n uint32) bool {
+ mutations := int(n%1e4 + 1e4)
+ concLevel := int(n%7 + 1)
+ vecLength := int(n%3 + 1)
+
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ sum := NewSummaryVec(
+ SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+ },
+ []string{"label"},
+ )
+
+ allVars := make([][]float64, vecLength)
+ sampleSums := make([]float64, vecLength)
+ for i := 0; i < concLevel; i++ {
+ vals := make([]float64, mutations)
+ picks := make([]int, mutations)
+ for j := 0; j < mutations; j++ {
+ v := rand.NormFloat64()
+ vals[j] = v
+ pick := rand.Intn(vecLength)
+ picks[j] = pick
+ allVars[pick] = append(allVars[pick], v)
+ sampleSums[pick] += v
+ }
+
+ go func(vals []float64) {
+ start.Wait()
+ for i, v := range vals {
+ sum.WithLabelValues(string('A' + picks[i])).Observe(v)
+ }
+ end.Done()
+ }(vals)
+ }
+ for _, vars := range allVars {
+ sort.Float64s(vars)
+ }
+ start.Done()
+ end.Wait()
+
+ for i := 0; i < vecLength; i++ {
+ m := &dto.Metric{}
+ s := sum.WithLabelValues(string('A' + i))
+ s.Write(m)
+ if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
+ t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
+ }
+ if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
+ t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
+ }
+ for j, wantQ := range objectives {
+ ε := DefObjectives[wantQ]
+ gotQ := *m.Summary.Quantile[j].Quantile
+ gotV := *m.Summary.Quantile[j].Value
+ min, max := getBounds(allVars[i], wantQ, ε)
+ if gotQ != wantQ {
+ t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
+ }
+ if gotV < min || gotV > max {
+ t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
+ }
+ }
+ }
+ return true
+ }
+
+ if err := quick.Check(it, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSummaryDecay(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping test in short mode.")
+ // More because it depends on timing than because it is particularly long...
+ }
+
+ sum := NewSummary(SummaryOpts{
+ Name: "test_summary",
+ Help: "helpless",
+ MaxAge: 100 * time.Millisecond,
+ Objectives: map[float64]float64{0.1: 0.001},
+ AgeBuckets: 10,
+ })
+
+ m := &dto.Metric{}
+ i := 0
+ tick := time.NewTicker(time.Millisecond)
+ for range tick.C {
+ i++
+ sum.Observe(float64(i))
+ if i%10 == 0 {
+ sum.Write(m)
+ if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
+ t.Errorf("%d. got %f, want %f", i, got, want)
+ }
+ m.Reset()
+ }
+ if i >= 1000 {
+ break
+ }
+ }
+ tick.Stop()
+ // Wait for MaxAge without observations and make sure quantiles are NaN.
+ time.Sleep(100 * time.Millisecond)
+ sum.Write(m)
+ if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {
+ t.Errorf("got %f, want NaN after expiration", got)
+ }
+}
+
+func getBounds(vars []float64, q, ε float64) (min, max float64) {
+ // TODO(beorn7): This currently tolerates an error of up to 2*ε. The
+ // error must be at most ε, but for some reason, it's sometimes slightly
+ // higher. That's a bug.
+ n := float64(len(vars))
+ lower := int((q - 2*ε) * n)
+ upper := int(math.Ceil((q + 2*ε) * n))
+ min = vars[0]
+ if lower > 1 {
+ min = vars[lower-1]
+ }
+ max = vars[len(vars)-1]
+ if upper < len(vars) {
+ max = vars[upper-1]
+ }
+ return
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/src/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 00000000..f4cac5a0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/timer_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/timer_test.go
new file mode 100644
index 00000000..927b7118
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/timer_test.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestTimerObserve(t *testing.T) {
+ var (
+ his = NewHistogram(HistogramOpts{Name: "test_histogram"})
+ sum = NewSummary(SummaryOpts{Name: "test_summary"})
+ gauge = NewGauge(GaugeOpts{Name: "test_gauge"})
+ )
+
+ func() {
+ hisTimer := NewTimer(his)
+ sumTimer := NewTimer(sum)
+ gaugeTimer := NewTimer(ObserverFunc(gauge.Set))
+ defer hisTimer.ObserveDuration()
+ defer sumTimer.ObserveDuration()
+ defer gaugeTimer.ObserveDuration()
+ }()
+
+ m := &dto.Metric{}
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+ m.Reset()
+ sum.Write(m)
+ if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for summary, got %d", want, got)
+ }
+ m.Reset()
+ gauge.Write(m)
+ if got := m.GetGauge().GetValue(); got <= 0 {
+ t.Errorf("want value > 0 for gauge, got %f", got)
+ }
+}
+
+func TestTimerEmpty(t *testing.T) {
+ emptyTimer := NewTimer(nil)
+ emptyTimer.ObserveDuration()
+ // Do nothing, just demonstrate it works without panic.
+}
+
+func TestTimerConditionalTiming(t *testing.T) {
+ var (
+ his = NewHistogram(HistogramOpts{
+ Name: "test_histogram",
+ })
+ timeMe = true
+ m = &dto.Metric{}
+ )
+
+ timedFunc := func() {
+ timer := NewTimer(ObserverFunc(func(v float64) {
+ if timeMe {
+ his.Observe(v)
+ }
+ }))
+ defer timer.ObserveDuration()
+ }
+
+ timedFunc() // This will time.
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+
+ timeMe = false
+ timedFunc() // This will not time again.
+ m.Reset()
+ his.Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for histogram, got %d", want, got)
+ }
+}
+
+func TestTimerByOutcome(t *testing.T) {
+ var (
+ his = NewHistogramVec(
+ HistogramOpts{Name: "test_histogram"},
+ []string{"outcome"},
+ )
+ outcome = "foo"
+ m = &dto.Metric{}
+ )
+
+ timedFunc := func() {
+ timer := NewTimer(ObserverFunc(func(v float64) {
+ his.WithLabelValues(outcome).Observe(v)
+ }))
+ defer timer.ObserveDuration()
+
+ if outcome == "foo" {
+ outcome = "bar"
+ return
+ }
+ outcome = "foo"
+ }
+
+ timedFunc()
+ his.WithLabelValues("foo").Write(m)
+ if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+ timedFunc()
+ m.Reset()
+ his.WithLabelValues("foo").Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+ timedFunc()
+ m.Reset()
+ his.WithLabelValues("foo").Write(m)
+ if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'foo' histogram, got %d", want, got)
+ }
+ m.Reset()
+ his.WithLabelValues("bar").Write(m)
+ if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {
+ t.Errorf("want %d observations for 'bar' histogram, got %d", want, got)
+ }
+
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 00000000..065501d3
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,143 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+//
+// Deprecated: The Untyped type is deprecated because it doesn't make sense in
+// direct instrumentation. If you need to mirror an external metric of unknown
+// type (usually while writing exporters), Use MustNewConstMetric to create an
+// untyped metric instance on the fly.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ *MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/value.go b/vendor/src/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 00000000..7d3e8109
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,239 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) SetToCurrentTime() {
+ v.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 00000000..7f3eef9a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,404 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64][]metricWithLabelValues
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabels(h, labels), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ return m.deleteByHashWithLabelValues(h, lvs)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
+ if !ok {
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
diff --git a/vendor/src/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/src/github.com/prometheus/client_golang/prometheus/vec_test.go
new file mode 100644
index 00000000..e3c5aeba
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_golang/prometheus/vec_test.go
@@ -0,0 +1,312 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "testing"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func TestDelete(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testDelete(t, vec)
+}
+
+func TestDeleteWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testDelete(t, vec)
+}
+
+func testDelete(t *testing.T, vec *UntypedVec) {
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+}
+
+func TestDeleteLabelValues(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testDeleteLabelValues(t, vec)
+}
+
+func TestDeleteLabelValuesWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testDeleteLabelValues(t, vec)
+}
+
+func testDeleteLabelValues(t *testing.T, vec *UntypedVec) {
+ if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision.
+ if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+
+ vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42)
+ // Delete out of order.
+ if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+ if got, want := vec.DeleteLabelValues("v1"), false; got != want {
+ t.Errorf("got %v, want %v", got, want)
+ }
+}
+
+func TestMetricVec(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ testMetricVec(t, vec)
+}
+
+func TestMetricVecWithCollisions(t *testing.T) {
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"l1", "l2"},
+ )
+ vec.hashAdd = func(h uint64, s string) uint64 { return 1 }
+ vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }
+ testMetricVec(t, vec)
+}
+
+func testMetricVec(t *testing.T, vec *UntypedVec) {
+ vec.Reset() // Actually test Reset now!
+
+ var pair [2]string
+ // Keep track of metrics.
+ expected := map[[2]string]int{}
+
+ for i := 0; i < 1000; i++ {
+ pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.
+ expected[pair]++
+ vec.WithLabelValues(pair[0], pair[1]).Inc()
+
+ expected[[2]string{"v1", "v2"}]++
+ vec.WithLabelValues("v1", "v2").(Untyped).Inc()
+ }
+
+ var total int
+ for _, metrics := range vec.children {
+ for _, metric := range metrics {
+ total++
+ copy(pair[:], metric.values)
+
+ var metricOut dto.Metric
+ if err := metric.metric.Write(&metricOut); err != nil {
+ t.Fatal(err)
+ }
+ actual := *metricOut.Untyped.Value
+
+ var actualPair [2]string
+ for i, label := range metricOut.Label {
+ actualPair[i] = *label.Value
+ }
+
+ // Test output pair against metric.values to ensure we've selected
+ // the right one. We check this to ensure the below check means
+ // anything at all.
+ if actualPair != pair {
+ t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair)
+ }
+
+ if actual != float64(expected[pair]) {
+ t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair])
+ }
+ }
+ }
+
+ if total != len(expected) {
+ t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected))
+ }
+
+ vec.Reset()
+
+ if len(vec.children) > 0 {
+ t.Fatalf("reset failed")
+ }
+}
+
+func TestCounterVecEndToEndWithCollision(t *testing.T) {
+ vec := NewCounterVec(
+ CounterOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ []string{"labelname"},
+ )
+ vec.WithLabelValues("77kepQFQ8Kl").Inc()
+ vec.WithLabelValues("!0IC=VloaY").Add(2)
+
+ m := &dto.Metric{}
+ if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil {
+ t.Fatal(err)
+ }
+ if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want {
+ t.Errorf("got label value %q, want %q", got, want)
+ }
+ if got, want := m.GetCounter().GetValue(), 1.; got != want {
+ t.Errorf("got value %f, want %f", got, want)
+ }
+ m.Reset()
+ if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil {
+ t.Fatal(err)
+ }
+ if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want {
+ t.Errorf("got label value %q, want %q", got, want)
+ }
+ if got, want := m.GetCounter().GetValue(), 2.; got != want {
+ t.Errorf("got value %f, want %f", got, want)
+ }
+}
+
+func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {
+ benchmarkMetricVecWithLabelValues(b, map[string][]string{
+ "l1": {"onevalue"},
+ "l2": {"twovalue"},
+ })
+}
+
+func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10)
+}
+
+func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10)
+}
+
+func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100)
+}
+
+func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100)
+}
+
+func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) {
+ benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000)
+}
+
+func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) {
+ labels := map[string][]string{}
+
+ for i := 0; i < nkeys; i++ {
+ var (
+ k = fmt.Sprintf("key-%v", i)
+ vs = make([]string, 0, nvalues)
+ )
+ for j := 0; j < nvalues; j++ {
+ vs = append(vs, fmt.Sprintf("value-%v", j))
+ }
+ labels[k] = vs
+ }
+
+ benchmarkMetricVecWithLabelValues(b, labels)
+}
+
+func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) {
+ var keys []string
+ for k := range labels { // Map order dependent, who cares though.
+ keys = append(keys, k)
+ }
+
+ values := make([]string, len(labels)) // Value cache for permutations.
+ vec := NewUntypedVec(
+ UntypedOpts{
+ Name: "test",
+ Help: "helpless",
+ },
+ keys,
+ )
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ // Varies input across provide map entries based on key size.
+ for j, k := range keys {
+ candidates := labels[k]
+ values[j] = candidates[i%len(candidates)]
+ }
+
+ vec.WithLabelValues(values...)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 00000000..b065f868
--- /dev/null
+++ b/vendor/src/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/bench_test.go b/vendor/src/github.com/prometheus/common/expfmt/bench_test.go
new file mode 100644
index 00000000..e539bfc1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/bench_test.go
@@ -0,0 +1,167 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var parser TextParser
+
+// Benchmarks to show how much penalty text format parsing actually inflicts.
+//
+// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
+//
+// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
+// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
+// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
+// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
+// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
+//
+// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
+// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
+// the difference becomes less relevant, only ~4x.
+//
+// The test data contains 248 samples.
+
+// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
+// family DTOs.
+func BenchmarkParseText(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
+// into metric family DTOs.
+func BenchmarkParseTextGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/text.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ if _, err := parser.TextToMetricFamilies(in); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
+// metric family DTOs. Note that this does not build a map of metric families
+// (as the text version does), because it is not required for Prometheus
+// ingestion either. (However, it is required for the text-format parsing, as
+// the metric family might be sprinkled all over the text, while the
+// protobuf-format guarantees bundling at one place.)
+func BenchmarkParseProto(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
+// protobuf format.
+func BenchmarkParseProtoGzip(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf.gz")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ family := &dto.MetricFamily{}
+ in, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ b.Fatal(err)
+ }
+ for {
+ family.Reset()
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
+// metric family DTOs into a map. This is not happening during Prometheus
+// ingestion. It is just here to measure the overhead of that map creation and
+// separate it from the overhead of the text format parsing.
+func BenchmarkParseProtoMap(b *testing.B) {
+ b.StopTimer()
+ data, err := ioutil.ReadFile("testdata/protobuf")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ families := map[string]*dto.MetricFamily{}
+ in := bytes.NewReader(data)
+ for {
+ family := &dto.MetricFamily{}
+ if _, err := pbutil.ReadDelimited(in, family); err != nil {
+ if err == io.EOF {
+ break
+ }
+ b.Fatal(err)
+ }
+ families[family.GetName()] = family
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/decode.go b/vendor/src/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 00000000..a7a42d5e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,429 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
+ return err
+ }
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occured.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
+ for _, f := range fams {
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
+ }
+ return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f), nil
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f), nil
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f), nil
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f), nil
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f), nil
+ }
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/decode_test.go b/vendor/src/github.com/prometheus/common/expfmt/decode_test.go
new file mode 100644
index 00000000..82c1130c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/decode_test.go
@@ -0,0 +1,435 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "io"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
+)
+
+func TestTextDecoder(t *testing.T) {
+ var (
+ ts = model.Now()
+ in = `
+# Only a quite simple scenario with two metric families.
+# More complicated tests of the parser itself can be found in the text package.
+# TYPE mf2 counter
+mf2 3
+mf1{label="value1"} -3.14 123456
+mf1{label="value2"} 42
+mf2 4
+`
+ out = model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value1",
+ },
+ Value: -3.14,
+ Timestamp: 123456,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf1",
+ "label": "value2",
+ },
+ Value: 42,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 3,
+ Timestamp: ts,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "mf2",
+ },
+ Value: 4,
+ Timestamp: ts,
+ },
+ }
+ )
+
+ dec := &SampleDecoder{
+ Dec: &textDecoder{r: strings.NewReader(in)},
+ Opts: &DecodeOptions{
+ Timestamp: ts,
+ },
+ }
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(out)
+ if !reflect.DeepEqual(all, out) {
+ t.Fatalf("output does not match")
+ }
+}
+
+func TestProtoDecoder(t *testing.T) {
+
+ var testTime = model.Now()
+
+ scenarios := []struct {
+ in string
+ expected model.Vector
+ fail bool
+ }{
+ {
+ in: "",
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ fail: true,
+ },
+ {
+ in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 84,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "some_label_name": "some_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.99",
+ },
+ Value: -42,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "some_label_name": "some_label_value",
+ "quantile": "0.999",
+ },
+ Value: -84,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_count",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count_sum",
+ "another_label_name": "another_label_value",
+ },
+ Value: 0,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ "another_label_name": "another_label_value",
+ "quantile": "0.5",
+ },
+ Value: 10,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "100",
+ },
+ Value: 123,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "120",
+ },
+ Value: 412,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "144",
+ },
+ Value: 592,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "172.8",
+ },
+ Value: 1524,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_bucket",
+ "le": "+Inf",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_sum",
+ },
+ Value: 1756047.3,
+ Timestamp: testTime,
+ },
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_duration_microseconds_count",
+ },
+ Value: 2693,
+ Timestamp: testTime,
+ },
+ },
+ },
+ {
+ // The metric type is unset in this protobuf, which needs to be handled
+ // correctly by the decoder.
+ in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?",
+ expected: model.Vector{
+ &model.Sample{
+ Metric: model.Metric{
+ model.MetricNameLabel: "request_count",
+ },
+ Value: 1,
+ Timestamp: testTime,
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ dec := &SampleDecoder{
+ Dec: &protoDecoder{r: strings.NewReader(scenario.in)},
+ Opts: &DecodeOptions{
+ Timestamp: testTime,
+ },
+ }
+
+ var all model.Vector
+ for {
+ var smpls model.Vector
+ err := dec.Decode(&smpls)
+ if err == io.EOF {
+ break
+ }
+ if scenario.fail {
+ if err == nil {
+ t.Fatal("Expected error but got none")
+ }
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ all = append(all, smpls...)
+ }
+ sort.Sort(all)
+ sort.Sort(scenario.expected)
+ if !reflect.DeepEqual(all, scenario.expected) {
+ t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all)
+ }
+ }
+}
+
+func testDiscriminatorHTTPHeader(t testing.TB) {
+ var scenarios = []struct {
+ input map[string]string
+ output Format
+ err error
+ }{
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
+ output: FmtProtoDelim,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
+ output: FmtUnknown,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain`},
+ output: FmtText,
+ },
+ {
+ input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
+ output: FmtUnknown,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var header http.Header
+
+ if len(scenario.input) > 0 {
+ header = http.Header{}
+ }
+
+ for key, value := range scenario.input {
+ header.Add(key, value)
+ }
+
+ actual := ResponseFormat(header)
+
+ if scenario.output != actual {
+ t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
+ }
+ }
+}
+
+func TestDiscriminatorHTTPHeader(t *testing.T) {
+ testDiscriminatorHTTPHeader(t)
+}
+
+func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testDiscriminatorHTTPHeader(b)
+ }
+}
+
+func TestExtractSamples(t *testing.T) {
+ var (
+ goodMetricFamily1 = &dto.MetricFamily{
+ Name: proto.String("foo"),
+ Help: proto.String("Help for foo."),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Counter: &dto.Counter{
+ Value: proto.Float64(4711),
+ },
+ },
+ },
+ }
+ goodMetricFamily2 = &dto.MetricFamily{
+ Name: proto.String("bar"),
+ Help: proto.String("Help for bar."),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(3.14),
+ },
+ },
+ },
+ }
+ badMetricFamily = &dto.MetricFamily{
+ Name: proto.String("bad"),
+ Help: proto.String("Help for bad."),
+ Type: dto.MetricType(42).Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(2.7),
+ },
+ },
+ },
+ }
+
+ opts = &DecodeOptions{
+ Timestamp: 42,
+ }
+ )
+
+ got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2)
+ if err != nil {
+ t.Error("Unexpected error from ExtractSamples:", err)
+ }
+ want := model.Vector{
+ &model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42},
+ &model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42},
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+ }
+
+ got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2)
+ if err == nil {
+ t.Error("Expected error from ExtractSamples")
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/encode.go b/vendor/src/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 00000000..11839ed6
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/expfmt.go b/vendor/src/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 00000000..371ac750
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire protocols.
+const (
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz.go b/vendor/src/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 00000000..dc2eedee
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
new file mode 100644
index 00000000..139597f9
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
@@ -0,0 +1,2 @@
+
+
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
new file mode 100644
index 00000000..2ae87067
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
@@ -0,0 +1,6 @@
+
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
new file mode 100644
index 00000000..5c351db3
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
@@ -0,0 +1,12 @@
+
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
new file mode 100644
index 00000000..0b3c345a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
@@ -0,0 +1,22 @@
+
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
new file mode 100644
index 00000000..bde0a387
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
@@ -0,0 +1,10 @@
+
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
new file mode 100644
index 00000000..4c67f9a1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
@@ -0,0 +1 @@
+bla 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
new file mode 100644
index 00000000..b853478e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
@@ -0,0 +1 @@
+metric{label="\t"} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
new file mode 100644
index 00000000..b5fe5f5a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2 3
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
new file mode 100644
index 00000000..57c7fbc0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
@@ -0,0 +1 @@
+metric{label="bla"} blubb
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
new file mode 100644
index 00000000..0a9df79a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
@@ -0,0 +1,3 @@
+
+# HELP metric one
+# HELP metric two
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
new file mode 100644
index 00000000..5bc74278
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
@@ -0,0 +1,3 @@
+
+# TYPE metric counter
+# TYPE metric untyped
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
new file mode 100644
index 00000000..a9a24265
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
@@ -0,0 +1,3 @@
+
+metric 4.12
+# TYPE metric counter
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
new file mode 100644
index 00000000..7e95ca8f
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
@@ -0,0 +1,2 @@
+
+# TYPE metric bla
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
new file mode 100644
index 00000000..7825f888
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
@@ -0,0 +1,2 @@
+
+# TYPE met-ric
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
new file mode 100644
index 00000000..8f35cae0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
@@ -0,0 +1 @@
+@invalidmetric{label="bla"} 3.14 2
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
new file mode 100644
index 00000000..7ca2cc26
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
@@ -0,0 +1 @@
+{label="bla"} 3.14 2
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
new file mode 100644
index 00000000..7a6ccc0d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
@@ -0,0 +1,3 @@
+
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
new file mode 100644
index 00000000..726d0017
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
@@ -0,0 +1,3 @@
+
+metric{label="new
+line"} 3.14
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
new file mode 100644
index 00000000..6aa9e308
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
@@ -0,0 +1 @@
+metric{@="bla"} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
new file mode 100644
index 00000000..d112cb90
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
@@ -0,0 +1 @@
+metric{__name__="bla"} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
new file mode 100644
index 00000000..b34554a8
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
@@ -0,0 +1 @@
+metric{label+="bla"} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
new file mode 100644
index 00000000..c4d7df3d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
@@ -0,0 +1 @@
+metric{label=bla} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
new file mode 100644
index 00000000..97eafc4a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
@@ -0,0 +1,3 @@
+
+# TYPE metric summary
+metric{quantile="bla"} 3.14
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
new file mode 100644
index 00000000..fc706496
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
@@ -0,0 +1 @@
+metric{label="bla"+} 3.14
\ No newline at end of file
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
new file mode 100644
index 00000000..57b4879c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
@@ -0,0 +1 @@
+metric{label="bla"} 3.14 2.72
diff --git a/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
new file mode 100644
index 00000000..be1e6a36
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
@@ -0,0 +1 @@
+m{} 0
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/json2 b/vendor/src/github.com/prometheus/common/expfmt/testdata/json2
new file mode 100644
index 00000000..b914c938
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/testdata/json2
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "service": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/json2_bad b/vendor/src/github.com/prometheus/common/expfmt/testdata/json2_bad
new file mode 100644
index 00000000..cc6ac97c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/testdata/json2_bad
@@ -0,0 +1,46 @@
+[
+ {
+ "baseLabels": {
+ "__name__": "rpc_calls_total",
+ "job": "batch_job"
+ },
+ "docstring": "RPC calls.",
+ "metric": {
+ "type": "counter",
+ "value": [
+ {
+ "labels": {
+ "servic|e": "zed"
+ },
+ "value": 25
+ },
+ {
+ "labels": {
+ "service": "bar"
+ },
+ "value": 24
+ }
+ ]
+ }
+ },
+ {
+ "baseLabels": {
+ "__name__": "rpc_latency_microseconds"
+ },
+ "docstring": "RPC latency.",
+ "metric": {
+ "type": "histogram",
+ "value": [
+ {
+ "labels": {
+ "service": "foo"
+ },
+ "value": {
+ "0.010000": 15,
+ "0.990000": 17
+ }
+ }
+ ]
+ }
+ }
+]
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf b/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf
new file mode 100644
index 00000000..b2d018a7
Binary files /dev/null and b/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf differ
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz b/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz
new file mode 100644
index 00000000..7622adb1
Binary files /dev/null and b/vendor/src/github.com/prometheus/common/expfmt/testdata/protobuf.gz differ
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/text b/vendor/src/github.com/prometheus/common/expfmt/testdata/text
new file mode 100644
index 00000000..f3d8c378
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/testdata/text
@@ -0,0 +1,322 @@
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/"} 0
+http_request_duration_microseconds_count{handler="/"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/alerts"} 0
+http_request_duration_microseconds_count{handler="/alerts"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/metrics"} 0
+http_request_duration_microseconds_count{handler="/api/metrics"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query"} 0
+http_request_duration_microseconds_count{handler="/api/query"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/query_range"} 0
+http_request_duration_microseconds_count{handler="/api/query_range"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/api/targets"} 0
+http_request_duration_microseconds_count{handler="/api/targets"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/consoles/"} 0
+http_request_duration_microseconds_count{handler="/consoles/"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/graph"} 0
+http_request_duration_microseconds_count{handler="/graph"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/heap"} 0
+http_request_duration_microseconds_count{handler="/heap"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
+http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
+http_request_duration_microseconds_sum{handler="/static/"} 0
+http_request_duration_microseconds_count{handler="/static/"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
+http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
+http_request_duration_microseconds_count{handler="prometheus"} 119
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="/",quantile="0.5"} 0
+http_request_size_bytes{handler="/",quantile="0.9"} 0
+http_request_size_bytes{handler="/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/"} 0
+http_request_size_bytes_count{handler="/"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/alerts"} 0
+http_request_size_bytes_count{handler="/alerts"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/metrics"} 0
+http_request_size_bytes_count{handler="/api/metrics"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query"} 0
+http_request_size_bytes_count{handler="/api/query"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/query_range"} 0
+http_request_size_bytes_count{handler="/api/query_range"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/api/targets"} 0
+http_request_size_bytes_count{handler="/api/targets"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/consoles/"} 0
+http_request_size_bytes_count{handler="/consoles/"} 0
+http_request_size_bytes{handler="/graph",quantile="0.5"} 0
+http_request_size_bytes{handler="/graph",quantile="0.9"} 0
+http_request_size_bytes{handler="/graph",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/graph"} 0
+http_request_size_bytes_count{handler="/graph"} 0
+http_request_size_bytes{handler="/heap",quantile="0.5"} 0
+http_request_size_bytes{handler="/heap",quantile="0.9"} 0
+http_request_size_bytes{handler="/heap",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/heap"} 0
+http_request_size_bytes_count{handler="/heap"} 0
+http_request_size_bytes{handler="/static/",quantile="0.5"} 0
+http_request_size_bytes{handler="/static/",quantile="0.9"} 0
+http_request_size_bytes{handler="/static/",quantile="0.99"} 0
+http_request_size_bytes_sum{handler="/static/"} 0
+http_request_size_bytes_count{handler="/static/"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
+http_request_size_bytes_sum{handler="prometheus"} 34488
+http_request_size_bytes_count{handler="prometheus"} 119
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="prometheus",method="get"} 119
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="/",quantile="0.5"} 0
+http_response_size_bytes{handler="/",quantile="0.9"} 0
+http_response_size_bytes{handler="/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/"} 0
+http_response_size_bytes_count{handler="/"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
+http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/alerts"} 0
+http_response_size_bytes_count{handler="/alerts"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/metrics"} 0
+http_response_size_bytes_count{handler="/api/metrics"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query"} 0
+http_response_size_bytes_count{handler="/api/query"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/query_range"} 0
+http_response_size_bytes_count{handler="/api/query_range"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
+http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/api/targets"} 0
+http_response_size_bytes_count{handler="/api/targets"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
+http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/consoles/"} 0
+http_response_size_bytes_count{handler="/consoles/"} 0
+http_response_size_bytes{handler="/graph",quantile="0.5"} 0
+http_response_size_bytes{handler="/graph",quantile="0.9"} 0
+http_response_size_bytes{handler="/graph",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/graph"} 0
+http_response_size_bytes_count{handler="/graph"} 0
+http_response_size_bytes{handler="/heap",quantile="0.5"} 0
+http_response_size_bytes{handler="/heap",quantile="0.9"} 0
+http_response_size_bytes{handler="/heap",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/heap"} 0
+http_response_size_bytes_count{handler="/heap"} 0
+http_response_size_bytes{handler="/static/",quantile="0.5"} 0
+http_response_size_bytes{handler="/static/",quantile="0.9"} 0
+http_response_size_bytes{handler="/static/",quantile="0.99"} 0
+http_response_size_bytes_sum{handler="/static/"} 0
+http_response_size_bytes_count{handler="/static/"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
+http_response_size_bytes_sum{handler="prometheus"} 247001
+http_response_size_bytes_count{handler="prometheus"} 119
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 0.55
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 70
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 8192
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 29
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 5.3870592e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.42236894836e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 5.41478912e+08
+# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
+# TYPE prometheus_dns_sd_lookup_failures_total counter
+prometheus_dns_sd_lookup_failures_total 0
+# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
+# TYPE prometheus_dns_sd_lookups_total counter
+prometheus_dns_sd_lookups_total 7
+# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
+# TYPE prometheus_evaluator_duration_milliseconds summary
+prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
+prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
+prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
+prometheus_evaluator_duration_milliseconds_sum 12
+prometheus_evaluator_duration_milliseconds_count 23
+# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
+# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
+prometheus_local_storage_checkpoint_duration_milliseconds 0
+# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
+# TYPE prometheus_local_storage_chunk_ops_total counter
+prometheus_local_storage_chunk_ops_total{type="create"} 598
+prometheus_local_storage_chunk_ops_total{type="persist"} 174
+prometheus_local_storage_chunk_ops_total{type="pin"} 920
+prometheus_local_storage_chunk_ops_total{type="transcode"} 415
+prometheus_local_storage_chunk_ops_total{type="unpin"} 920
+# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
+# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
+prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
+# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
+# TYPE prometheus_local_storage_indexing_batch_sizes summary
+prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
+prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
+prometheus_local_storage_indexing_batch_sizes_sum 2
+prometheus_local_storage_indexing_batch_sizes_count 1
+# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
+# TYPE prometheus_local_storage_indexing_queue_capacity gauge
+prometheus_local_storage_indexing_queue_capacity 16384
+# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
+# TYPE prometheus_local_storage_indexing_queue_length gauge
+prometheus_local_storage_indexing_queue_length 0
+# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
+# TYPE prometheus_local_storage_ingested_samples_total counter
+prometheus_local_storage_ingested_samples_total 30473
+# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
+# TYPE prometheus_local_storage_invalid_preload_requests_total counter
+prometheus_local_storage_invalid_preload_requests_total 0
+# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
+# TYPE prometheus_local_storage_memory_chunkdescs gauge
+prometheus_local_storage_memory_chunkdescs 1059
+# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
+# TYPE prometheus_local_storage_memory_chunks gauge
+prometheus_local_storage_memory_chunks 1020
+# HELP prometheus_local_storage_memory_series The current number of series in memory.
+# TYPE prometheus_local_storage_memory_series gauge
+prometheus_local_storage_memory_series 424
+# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
+# TYPE prometheus_local_storage_persist_latency_microseconds summary
+prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
+prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
+prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
+prometheus_local_storage_persist_latency_microseconds_sum 20424.415
+prometheus_local_storage_persist_latency_microseconds_count 174
+# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
+# TYPE prometheus_local_storage_persist_queue_capacity gauge
+prometheus_local_storage_persist_queue_capacity 1024
+# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
+# TYPE prometheus_local_storage_persist_queue_length gauge
+prometheus_local_storage_persist_queue_length 0
+# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
+# TYPE prometheus_local_storage_series_ops_total counter
+prometheus_local_storage_series_ops_total{type="create"} 2
+prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
+# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
+# TYPE prometheus_notifications_latency_milliseconds summary
+prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
+prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
+prometheus_notifications_latency_milliseconds_sum 0
+prometheus_notifications_latency_milliseconds_count 0
+# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
+# TYPE prometheus_notifications_queue_capacity gauge
+prometheus_notifications_queue_capacity 100
+# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
+# TYPE prometheus_notifications_queue_length gauge
+prometheus_notifications_queue_length 0
+# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
+# TYPE prometheus_rule_evaluation_duration_milliseconds summary
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
+prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
+prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
+prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
+# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
+# TYPE prometheus_rule_evaluation_failures_total counter
+prometheus_rule_evaluation_failures_total 0
+# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
+# TYPE prometheus_samples_queue_capacity gauge
+prometheus_samples_queue_capacity 4096
+# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
+# TYPE prometheus_samples_queue_length gauge
+prometheus_samples_queue_length 0
+# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
+# TYPE prometheus_target_interval_length_seconds summary
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
+prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
+prometheus_target_interval_length_seconds_sum{interval="15s"} 175
+prometheus_target_interval_length_seconds_count{interval="15s"} 12
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
+prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
+prometheus_target_interval_length_seconds_sum{interval="1s"} 55
+prometheus_target_interval_length_seconds_count{interval="1s"} 117
diff --git a/vendor/src/github.com/prometheus/common/expfmt/testdata/text.gz b/vendor/src/github.com/prometheus/common/expfmt/testdata/text.gz
new file mode 100644
index 00000000..b7658c84
Binary files /dev/null and b/vendor/src/github.com/prometheus/common/expfmt/testdata/text.gz differ
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_create.go b/vendor/src/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 00000000..f11321cd
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,303 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_create_test.go b/vendor/src/github.com/prometheus/common/expfmt/text_create_test.go
new file mode 100644
index 00000000..e4cc5d80
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_create_test.go
@@ -0,0 +1,443 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bytes"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testCreate(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ out string
+ }{
+ // 0: Counter, NaN as value, timestamp given.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ out: `# HELP name two-line\n doc str\\ing
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name{labelname="val2",basename="basevalue"} 0.23 1234567890
+`,
+ },
+ // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("gauge_name"),
+ Help: proto.String("gauge\ndoc\nstr\"ing"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("val with\nnew line"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("val with \\backslash and \"quotes\""),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("Björn"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("佖佥"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(3.14E42),
+ },
+ },
+ },
+ },
+ out: `# HELP gauge_name gauge\ndoc\nstr"ing
+# TYPE gauge_name gauge
+gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
+gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
+`,
+ },
+ // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("untyped_name"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1.23e-45),
+ },
+ },
+ },
+ },
+ out: `# TYPE untyped_name untyped
+untyped_name -Inf
+untyped_name{name_1="value 1"} -1.23e-45
+`,
+ },
+ // 3: Summary.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("summary_name"),
+ Help: proto.String("summary docstring"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(-3.4567),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(-1.23),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(.2342354),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(0),
+ },
+ },
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("name_1"),
+ Value: proto.String("value 1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("name_2"),
+ Value: proto.String("value 2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(4711),
+ SampleSum: proto.Float64(2010.1971),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(1),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(2),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.99),
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP summary_name summary docstring
+# TYPE summary_name summary
+summary_name{quantile="0.5"} -1.23
+summary_name{quantile="0.9"} 0.2342354
+summary_name{quantile="0.99"} 0
+summary_name_sum -3.4567
+summary_name_count 42
+summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
+summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
+summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
+summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
+summary_name_count{name_1="value 1",name_2="value 2"} 4711
+`,
+ },
+ // 4: Histogram
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 5: Histogram with missing +Inf bucket.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ },
+ },
+ },
+ },
+ },
+ out: `# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ },
+ // 6: No metric type, should result in default type Counter.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ out: `# HELP name doc string
+# TYPE name counter
+name -Inf
+`,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
+ n, err := MetricFamilyToText(out, scenario.in)
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), n; expected != got {
+ t.Errorf(
+ "%d. expected %d bytes written, got %d",
+ i, expected, got,
+ )
+ }
+ if expected, got := scenario.out, out.String(); expected != got {
+ t.Errorf(
+ "%d. expected out=%q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreate(t *testing.T) {
+ testCreate(t)
+}
+
+func BenchmarkCreate(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreate(b)
+ }
+}
+
+func testCreateError(t testing.TB) {
+ var scenarios = []struct {
+ in *dto.MetricFamily
+ err string
+ }{
+ // 0: No metric.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{},
+ },
+ err: "MetricFamily has no metrics",
+ },
+ // 1: No metric name.
+ {
+ in: &dto.MetricFamily{
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "MetricFamily has no name",
+ },
+ // 2: Wrong type.
+ {
+ in: &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("doc string"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ err: "expected counter in metric",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ var out bytes.Buffer
+ _, err := MetricFamilyToText(&out, scenario.in)
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestCreateError(t *testing.T) {
+ testCreateError(t)
+}
+
+func BenchmarkCreateError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testCreateError(b)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_parse.go b/vendor/src/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 00000000..ef9a1507
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/src/github.com/prometheus/common/expfmt/text_parse_test.go
new file mode 100644
index 00000000..7e7388ce
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/expfmt/text_parse_test.go
@@ -0,0 +1,588 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func testTextParse(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ out []*dto.MetricFamily
+ }{
+ // 0: Empty lines as input.
+ {
+ in: `
+
+`,
+ out: []*dto.MetricFamily{},
+ },
+ // 1: Minimal case.
+ {
+ in: `
+minimal_metric 1.234
+another_metric -3e3 103948
+# Even that:
+no_labels{} 3
+# HELP line for non-existing metric will be ignored.
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("minimal_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(1.234),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_metric"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-3e3),
+ },
+ TimestampMs: proto.Int64(103948),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("no_labels"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(3),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 2: Counters & gauges, docstrings, various whitespace, escape sequences.
+ {
+ in: `
+# A normal comment.
+#
+# TYPE name counter
+name{labelname="val1",basename="basevalue"} NaN
+name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
+# HELP name two-line\n doc str\\ing
+
+ # HELP name2 doc str"ing 2
+ # TYPE name2 gauge
+name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
+name2{ labelname = "val1" , }-Inf
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("name"),
+ Help: proto.String("two-line\n doc str\\ing"),
+ Type: dto.MetricType_COUNTER.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("base\"v\\al\nue"),
+ },
+ },
+ Counter: &dto.Counter{
+ Value: proto.Float64(.23),
+ },
+ TimestampMs: proto.Int64(1234567890),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("name2"),
+ Help: proto.String("doc str\"ing 2"),
+ Type: dto.MetricType_GAUGE.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("basename"),
+ Value: proto.String("basevalue2"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(+1)),
+ },
+ TimestampMs: proto.Int64(54321),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("labelname"),
+ Value: proto.String("val1"),
+ },
+ },
+ Gauge: &dto.Gauge{
+ Value: proto.Float64(math.Inf(-1)),
+ },
+ },
+ },
+ },
+ },
+ },
+ // 3: The evil summary, mixed with other types and funny comments.
+ {
+ in: `
+# TYPE my_summary summary
+my_summary{n1="val1",quantile="0.5"} 110
+decoy -1 -2
+my_summary{n1="val1",quantile="0.9"} 140 1
+my_summary_count{n1="val1"} 42
+# Latest timestamp wins in case of a summary.
+my_summary_sum{n1="val1"} 4711 2
+fake_sum{n1="val1"} 2001
+# TYPE another_summary summary
+another_summary_count{n2="val2",n1="val1"} 20
+my_summary_count{n2="val2",n1="val1"} 5 5
+another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
+my_summary_sum{n1="val2"} 08 15
+my_summary{n1="val3", quantile="0.2"} 4711
+ my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
+# some
+# funny comments
+# HELP
+# HELP
+# HELP my_summary
+# HELP my_summary
+`,
+ out: []*dto.MetricFamily{
+ &dto.MetricFamily{
+ Name: proto.String("fake_sum"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(2001),
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("decoy"),
+ Type: dto.MetricType_UNTYPED.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Untyped: &dto.Untyped{
+ Value: proto.Float64(-1),
+ },
+ TimestampMs: proto.Int64(-2),
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("my_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(42),
+ SampleSum: proto.Float64(4711),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.5),
+ Value: proto.Float64(110),
+ },
+ &dto.Quantile{
+ Quantile: proto.Float64(0.9),
+ Value: proto.Float64(140),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(2),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(5),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(-12.34),
+ Value: proto.Float64(math.NaN()),
+ },
+ },
+ },
+ TimestampMs: proto.Int64(5),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val2"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleSum: proto.Float64(8),
+ },
+ TimestampMs: proto.Int64(15),
+ },
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val3"),
+ },
+ },
+ Summary: &dto.Summary{
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.2),
+ Value: proto.Float64(4711),
+ },
+ },
+ },
+ },
+ },
+ },
+ &dto.MetricFamily{
+ Name: proto.String("another_summary"),
+ Type: dto.MetricType_SUMMARY.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Label: []*dto.LabelPair{
+ &dto.LabelPair{
+ Name: proto.String("n2"),
+ Value: proto.String("val2"),
+ },
+ &dto.LabelPair{
+ Name: proto.String("n1"),
+ Value: proto.String("val1"),
+ },
+ },
+ Summary: &dto.Summary{
+ SampleCount: proto.Uint64(20),
+ Quantile: []*dto.Quantile{
+ &dto.Quantile{
+ Quantile: proto.Float64(0.3),
+ Value: proto.Float64(-1.2),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ // 4: The histogram.
+ {
+ in: `
+# HELP request_duration_microseconds The response latency.
+# TYPE request_duration_microseconds histogram
+request_duration_microseconds_bucket{le="100"} 123
+request_duration_microseconds_bucket{le="120"} 412
+request_duration_microseconds_bucket{le="144"} 592
+request_duration_microseconds_bucket{le="172.8"} 1524
+request_duration_microseconds_bucket{le="+Inf"} 2693
+request_duration_microseconds_sum 1.7560473e+06
+request_duration_microseconds_count 2693
+`,
+ out: []*dto.MetricFamily{
+ {
+ Name: proto.String("request_duration_microseconds"),
+ Help: proto.String("The response latency."),
+ Type: dto.MetricType_HISTOGRAM.Enum(),
+ Metric: []*dto.Metric{
+ &dto.Metric{
+ Histogram: &dto.Histogram{
+ SampleCount: proto.Uint64(2693),
+ SampleSum: proto.Float64(1756047.3),
+ Bucket: []*dto.Bucket{
+ &dto.Bucket{
+ UpperBound: proto.Float64(100),
+ CumulativeCount: proto.Uint64(123),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(120),
+ CumulativeCount: proto.Uint64(412),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(144),
+ CumulativeCount: proto.Uint64(592),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(172.8),
+ CumulativeCount: proto.Uint64(1524),
+ },
+ &dto.Bucket{
+ UpperBound: proto.Float64(math.Inf(+1)),
+ CumulativeCount: proto.Uint64(2693),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, scenario := range scenarios {
+ out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err != nil {
+ t.Errorf("%d. error: %s", i, err)
+ continue
+ }
+ if expected, got := len(scenario.out), len(out); expected != got {
+ t.Errorf(
+ "%d. expected %d MetricFamilies, got %d",
+ i, expected, got,
+ )
+ }
+ for _, expected := range scenario.out {
+ got, ok := out[expected.GetName()]
+ if !ok {
+ t.Errorf(
+ "%d. expected MetricFamily %q, found none",
+ i, expected.GetName(),
+ )
+ continue
+ }
+ if expected.String() != got.String() {
+ t.Errorf(
+ "%d. expected MetricFamily %s, got %s",
+ i, expected, got,
+ )
+ }
+ }
+ }
+}
+
+func TestTextParse(t *testing.T) {
+ testTextParse(t)
+}
+
+func BenchmarkTextParse(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParse(b)
+ }
+}
+
+func testTextParseError(t testing.TB) {
+ var scenarios = []struct {
+ in string
+ err string
+ }{
+ // 0: No new-line at end of input.
+ {
+ in: `
+bla 3.14
+blubber 42`,
+ err: "text format parsing error in line 3: unexpected end of input stream",
+ },
+ // 1: Invalid escape sequence in label value.
+ {
+ in: `metric{label="\t"} 3.14`,
+ err: "text format parsing error in line 1: invalid escape sequence",
+ },
+ // 2: Newline in label value.
+ {
+ in: `
+metric{label="new
+line"} 3.14
+`,
+ err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
+ },
+ // 3:
+ {
+ in: `metric{@="bla"} 3.14`,
+ err: "text format parsing error in line 1: invalid label name for metric",
+ },
+ // 4:
+ {
+ in: `metric{__name__="bla"} 3.14`,
+ err: `text format parsing error in line 1: label name "__name__" is reserved`,
+ },
+ // 5:
+ {
+ in: `metric{label+="bla"} 3.14`,
+ err: "text format parsing error in line 1: expected '=' after label name",
+ },
+ // 6:
+ {
+ in: `metric{label=bla} 3.14`,
+ err: "text format parsing error in line 1: expected '\"' at start of label value",
+ },
+ // 7:
+ {
+ in: `
+# TYPE metric summary
+metric{quantile="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'quantile' label",
+ },
+ // 8:
+ {
+ in: `metric{label="bla"+} 3.14`,
+ err: "text format parsing error in line 1: unexpected end of label value",
+ },
+ // 9:
+ {
+ in: `metric{label="bla"} 3.14 2.72
+`,
+ err: "text format parsing error in line 1: expected integer as timestamp",
+ },
+ // 10:
+ {
+ in: `metric{label="bla"} 3.14 2 3
+`,
+ err: "text format parsing error in line 1: spurious string after timestamp",
+ },
+ // 11:
+ {
+ in: `metric{label="bla"} blubb
+`,
+ err: "text format parsing error in line 1: expected float as value",
+ },
+ // 12:
+ {
+ in: `
+# HELP metric one
+# HELP metric two
+`,
+ err: "text format parsing error in line 3: second HELP line for metric name",
+ },
+ // 13:
+ {
+ in: `
+# TYPE metric counter
+# TYPE metric untyped
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+metric 4.12
+# TYPE metric counter
+`,
+ err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
+ },
+ // 14:
+ {
+ in: `
+# TYPE metric bla
+`,
+ err: "text format parsing error in line 2: unknown metric type",
+ },
+ // 15:
+ {
+ in: `
+# TYPE met-ric
+`,
+ err: "text format parsing error in line 2: invalid metric name in comment",
+ },
+ // 16:
+ {
+ in: `@invalidmetric{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 17:
+ {
+ in: `{label="bla"} 3.14 2`,
+ err: "text format parsing error in line 1: invalid metric name",
+ },
+ // 18:
+ {
+ in: `
+# TYPE metric histogram
+metric_bucket{le="bla"} 3.14
+`,
+ err: "text format parsing error in line 3: expected float as value for 'le' label",
+ },
+ }
+
+ for i, scenario := range scenarios {
+ _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
+ if err == nil {
+ t.Errorf("%d. expected error, got nil", i)
+ continue
+ }
+ if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
+ t.Errorf(
+ "%d. expected error starting with %q, got %q",
+ i, expected, got,
+ )
+ }
+ }
+
+}
+
+func TestTextParseError(t *testing.T) {
+ testTextParseError(t)
+}
+
+func BenchmarkParseError(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testTextParseError(b)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 00000000..7723656d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 00000000..648b38cb
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
new file mode 100644
index 00000000..41d328f1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go
@@ -0,0 +1,33 @@
+package goautoneg
+
+import (
+ "testing"
+)
+
+var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
+
+func TestParseAccept(t *testing.T) {
+ alternatives := []string{"text/html", "image/png"}
+ content_type := Negotiate(chrome, alternatives)
+ if content_type != "image/png" {
+ t.Errorf("got %s expected image/png", content_type)
+ }
+
+ alternatives = []string{"text/html", "text/plain", "text/n3"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/html" {
+ t.Errorf("got %s expected text/html", content_type)
+ }
+
+ alternatives = []string{"text/n3", "text/plain"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/plain" {
+ t.Errorf("got %s expected text/plain", content_type)
+ }
+
+ alternatives = []string{"text/n3", "application/rdf+xml"}
+ content_type = Negotiate(chrome, alternatives)
+ if content_type != "text/n3" {
+ t.Errorf("got %s expected text/n3", content_type)
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/alert.go b/vendor/src/github.com/prometheus/common/model/alert.go
new file mode 100644
index 00000000..35e739c7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/src/github.com/prometheus/common/model/alert_test.go b/vendor/src/github.com/prometheus/common/model/alert_test.go
new file mode 100644
index 00000000..9692bca2
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/alert_test.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestAlertValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ alert *Alert
+ err string
+ }{
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ },
+ err: "start time missing",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts,
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(1 * time.Minute),
+ },
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ },
+ err: "start time must be before end time",
+ },
+ {
+ alert: &Alert{
+ StartsAt: ts,
+ },
+ err: "at least one label pair required",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b", "bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid label set: invalid value",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"!bad": "label"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid name",
+ },
+ {
+ alert: &Alert{
+ Labels: LabelSet{"a": "b"},
+ Annotations: LabelSet{"bad": "\xfflabel"},
+ StartsAt: ts,
+ },
+ err: "invalid annotations: invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.alert.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/fingerprinting.go b/vendor/src/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 00000000..fc4de410
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/src/github.com/prometheus/common/model/fnv.go b/vendor/src/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 00000000..038fc1c9
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/src/github.com/prometheus/common/model/labels.go b/vendor/src/github.com/prometheus/common/model/labels.go
new file mode 100644
index 00000000..41051a01
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelName(s).IsValid() {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/src/github.com/prometheus/common/model/labels_test.go b/vendor/src/github.com/prometheus/common/model/labels_test.go
new file mode 100644
index 00000000..e8df28ff
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/labels_test.go
@@ -0,0 +1,140 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+ "testing"
+)
+
+func testLabelNames(t testing.TB) {
+ var scenarios = []struct {
+ in LabelNames
+ out LabelNames
+ }{
+ {
+ in: LabelNames{"ZZZ", "zzz"},
+ out: LabelNames{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelNames{"aaa", "AAA"},
+ out: LabelNames{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelNames(t *testing.T) {
+ testLabelNames(t)
+}
+
+func BenchmarkLabelNames(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelNames(b)
+ }
+}
+
+func testLabelValues(t testing.TB) {
+ var scenarios = []struct {
+ in LabelValues
+ out LabelValues
+ }{
+ {
+ in: LabelValues{"ZZZ", "zzz"},
+ out: LabelValues{"ZZZ", "zzz"},
+ },
+ {
+ in: LabelValues{"aaa", "AAA"},
+ out: LabelValues{"AAA", "aaa"},
+ },
+ }
+
+ for i, scenario := range scenarios {
+ sort.Sort(scenario.in)
+
+ for j, expected := range scenario.out {
+ if expected != scenario.in[j] {
+ t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
+ }
+ }
+ }
+}
+
+func TestLabelValues(t *testing.T) {
+ testLabelValues(t)
+}
+
+func BenchmarkLabelValues(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testLabelValues(b)
+ }
+}
+
+func TestLabelNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ ln LabelName
+ valid bool
+ }{
+ {
+ ln: "Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "1valid_23name",
+ valid: false,
+ },
+ {
+ ln: "avalid_23name",
+ valid: true,
+ },
+ {
+ ln: "Ava:lid_23name",
+ valid: false,
+ },
+ {
+ ln: "a lid_23name",
+ valid: false,
+ },
+ {
+ ln: ":leading_colon",
+ valid: false,
+ },
+ {
+ ln: "colon:in:the:middle",
+ valid: false,
+ },
+ }
+
+ for _, s := range scenarios {
+ if s.ln.IsValid() != s.valid {
+ t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln)
+ }
+ if LabelNameRE.MatchString(string(s.ln)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/labelset.go b/vendor/src/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 00000000..6eda08a7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !ln.IsValid() {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/model/metric.go b/vendor/src/github.com/prometheus/common/model/metric.go
new file mode 100644
index 00000000..f7250909
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,103 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := make(Metric, len(m))
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/src/github.com/prometheus/common/model/metric_test.go b/vendor/src/github.com/prometheus/common/model/metric_test.go
new file mode 100644
index 00000000..06f9de52
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/metric_test.go
@@ -0,0 +1,132 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "testing"
+
+func testMetric(t testing.TB) {
+ var scenarios = []struct {
+ input LabelSet
+ fingerprint Fingerprint
+ fastFingerprint Fingerprint
+ }{
+ {
+ input: LabelSet{},
+ fingerprint: 14695981039346656037,
+ fastFingerprint: 14695981039346656037,
+ },
+ {
+ input: LabelSet{
+ "first_name": "electro",
+ "occupation": "robot",
+ "manufacturer": "westinghouse",
+ },
+ fingerprint: 5911716720268894962,
+ fastFingerprint: 11310079640881077873,
+ },
+ {
+ input: LabelSet{
+ "x": "y",
+ },
+ fingerprint: 8241431561484471700,
+ fastFingerprint: 13948396922932177635,
+ },
+ {
+ input: LabelSet{
+ "a": "bb",
+ "b": "c",
+ },
+ fingerprint: 3016285359649981711,
+ fastFingerprint: 3198632812309449502,
+ },
+ {
+ input: LabelSet{
+ "a": "b",
+ "bb": "c",
+ },
+ fingerprint: 7122421792099404749,
+ fastFingerprint: 5774953389407657638,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ input := Metric(scenario.input)
+
+ if scenario.fingerprint != input.Fingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint())
+ }
+ if scenario.fastFingerprint != input.FastFingerprint() {
+ t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint())
+ }
+ }
+}
+
+func TestMetric(t *testing.T) {
+ testMetric(t)
+}
+
+func BenchmarkMetric(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ testMetric(b)
+ }
+}
+
+func TestMetricNameIsValid(t *testing.T) {
+ var scenarios = []struct {
+ mn LabelValue
+ valid bool
+ }{
+ {
+ mn: "Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "_Avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "1valid_23name",
+ valid: false,
+ },
+ {
+ mn: "avalid_23name",
+ valid: true,
+ },
+ {
+ mn: "Ava:lid_23name",
+ valid: true,
+ },
+ {
+ mn: "a lid_23name",
+ valid: false,
+ },
+ {
+ mn: ":leading_colon",
+ valid: true,
+ },
+ {
+ mn: "colon:in:the:middle",
+ valid: true,
+ },
+ }
+
+ for _, s := range scenarios {
+ if IsValidMetricName(s.mn) != s.valid {
+ t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn)
+ }
+ if MetricNameRE.MatchString(string(s.mn)) != s.valid {
+ t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/model.go b/vendor/src/github.com/prometheus/common/model/model.go
new file mode 100644
index 00000000..a7b96917
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/src/github.com/prometheus/common/model/signature.go b/vendor/src/github.com/prometheus/common/model/signature.go
new file mode 100644
index 00000000..8762b13c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/src/github.com/prometheus/common/model/signature_test.go b/vendor/src/github.com/prometheus/common/model/signature_test.go
new file mode 100644
index 00000000..d59c8a8c
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/signature_test.go
@@ -0,0 +1,314 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+func TestLabelsToSignature(t *testing.T) {
+ var scenarios = []struct {
+ in map[string]string
+ out uint64
+ }{
+ {
+ in: map[string]string{},
+ out: 14695981039346656037,
+ },
+ {
+ in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := LabelsToSignature(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestMetricToFastFingerprint(t *testing.T) {
+ var scenarios = []struct {
+ in LabelSet
+ out Fingerprint
+ }{
+ {
+ in: LabelSet{},
+ out: 14695981039346656037,
+ },
+ {
+ in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
+ out: 12952432476264840823,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := labelSetToFastFingerprint(scenario.in)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureForLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels LabelNames
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"empty"},
+ out: 7187873163539638612,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: LabelNames{"fear", "name"},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: LabelNames{},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureForLabels(scenario.in, scenario.labels...)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func TestSignatureWithoutLabels(t *testing.T) {
+ var scenarios = []struct {
+ in Metric
+ labels map[LabelName]struct{}
+ out uint64
+ }{
+ {
+ in: Metric{},
+ labels: nil,
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
+ out: 14695981039346656037,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
+ labels: map[LabelName]struct{}{"foo": struct{}{}},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: map[LabelName]struct{}{},
+ out: 5799056148416392346,
+ },
+ {
+ in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
+ labels: nil,
+ out: 5799056148416392346,
+ },
+ }
+
+ for i, scenario := range scenarios {
+ actual := SignatureWithoutLabels(scenario.in, scenario.labels)
+
+ if actual != scenario.out {
+ t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
+ }
+ }
+}
+
+func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
+ for i := 0; i < b.N; i++ {
+ if a := LabelsToSignature(l); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
+ }
+ }
+}
+
+func BenchmarkLabelToSignatureScalar(b *testing.B) {
+ benchmarkLabelToSignature(b, nil, 14695981039346656037)
+}
+
+func BenchmarkLabelToSignatureSingle(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkLabelToSignatureDouble(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkLabelToSignatureTriple(b *testing.B) {
+ benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169)
+}
+
+func BenchmarkMetricToFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
+}
+
+func BenchmarkMetricToFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
+}
+
+func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
+ for i := 0; i < b.N; i++ {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+}
+
+func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
+}
+
+func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964)
+}
+
+func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
+}
+
+func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
+ benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
+}
+
+func BenchmarkEmptyLabelSignature(b *testing.B) {
+ input := []map[string]string{nil, {}}
+
+ var ms runtime.MemStats
+ runtime.ReadMemStats(&ms)
+
+ alloc := ms.Alloc
+
+ for _, labels := range input {
+ LabelsToSignature(labels)
+ }
+
+ runtime.ReadMemStats(&ms)
+
+ if got := ms.Alloc; alloc != got {
+ b.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
+ }
+}
+
+func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) {
+ var start, end sync.WaitGroup
+ start.Add(1)
+ end.Add(concLevel)
+
+ for i := 0; i < concLevel; i++ {
+ go func() {
+ start.Wait()
+ for j := b.N / concLevel; j >= 0; j-- {
+ if a := labelSetToFastFingerprint(ls); a != e {
+ b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
+ }
+ }
+ end.Done()
+ }()
+ }
+ b.ResetTimer()
+ start.Done()
+ end.Wait()
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
+}
+
+func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
+ benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
+}
diff --git a/vendor/src/github.com/prometheus/common/model/silence.go b/vendor/src/github.com/prometheus/common/model/silence.go
new file mode 100644
index 00000000..7538e299
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/model/silence_test.go b/vendor/src/github.com/prometheus/common/model/silence_test.go
new file mode 100644
index 00000000..8eaaf074
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/silence_test.go
@@ -0,0 +1,228 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMatcherValidate(t *testing.T) {
+ var cases = []struct {
+ matcher *Matcher
+ err string
+ }{
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value",
+ IsRegex: true,
+ },
+ },
+ {
+ matcher: &Matcher{
+ Name: "name!",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "",
+ Value: "value",
+ },
+ err: "invalid name",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "value\xff",
+ },
+ err: "invalid value",
+ },
+ {
+ matcher: &Matcher{
+ Name: "name",
+ Value: "",
+ },
+ err: "invalid value",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.matcher.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
+
+func TestSilenceValidate(t *testing.T) {
+ ts := time.Now()
+
+ var cases = []struct {
+ sil *Silence
+ err string
+ }{
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value"},
+ {Name: "name", Value: "value", IsRegex: true},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts.Add(-1 * time.Minute),
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time must be before end time",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "end time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "start time missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "!name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "invalid matcher",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ CreatedBy: "name",
+ },
+ err: "comment missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedBy: "name",
+ Comment: "comment",
+ },
+ err: "creation timestamp missing",
+ },
+ {
+ sil: &Silence{
+ Matchers: []*Matcher{
+ {Name: "name", Value: "value"},
+ },
+ StartsAt: ts,
+ EndsAt: ts,
+ CreatedAt: ts,
+ Comment: "comment",
+ },
+ err: "creator information missing",
+ },
+ }
+
+ for i, c := range cases {
+ err := c.sil.Validate()
+ if err == nil {
+ if c.err == "" {
+ continue
+ }
+ t.Errorf("%d. Expected error %q but got none", i, c.err)
+ continue
+ }
+ if c.err == "" && err != nil {
+ t.Errorf("%d. Expected no error but got %q", i, err)
+ continue
+ }
+ if !strings.Contains(err.Error(), c.err) {
+ t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/time.go b/vendor/src/github.com/prometheus/common/model/time.go
new file mode 100644
index 00000000..548968ae
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/src/github.com/prometheus/common/model/time_test.go b/vendor/src/github.com/prometheus/common/model/time_test.go
new file mode 100644
index 00000000..45ffd872
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/time_test.go
@@ -0,0 +1,129 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "testing"
+ "time"
+)
+
+func TestComparators(t *testing.T) {
+ t1a := TimeFromUnix(0)
+ t1b := TimeFromUnix(0)
+ t2 := TimeFromUnix(2*second - 1)
+
+ if !t1a.Equal(t1b) {
+ t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
+ }
+ if t1a.Equal(t2) {
+ t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
+ }
+
+ if !t1a.Before(t2) {
+ t.Fatalf("Expected %s to be before %s", t1a, t2)
+ }
+ if t1a.Before(t1b) {
+ t.Fatalf("Expected %s to not be before %s", t1a, t1b)
+ }
+
+ if !t2.After(t1a) {
+ t.Fatalf("Expected %s to be after %s", t2, t1a)
+ }
+ if t1b.After(t1a) {
+ t.Fatalf("Expected %s to not be after %s", t1b, t1a)
+ }
+}
+
+func TestTimeConversions(t *testing.T) {
+ unixSecs := int64(1136239445)
+ unixNsecs := int64(123456789)
+ unixNano := unixSecs*1e9 + unixNsecs
+
+ t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
+ t2 := time.Unix(unixSecs, unixNsecs)
+
+ ts := TimeFromUnixNano(unixNano)
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ // Test available precision.
+ ts = TimeFromUnixNano(t2.UnixNano())
+ if !ts.Time().Equal(t1) {
+ t.Fatalf("Expected %s, got %s", t1, ts.Time())
+ }
+
+ if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
+ t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
+ }
+}
+
+func TestDuration(t *testing.T) {
+ duration := time.Second + time.Minute + time.Hour
+ goTime := time.Unix(1136239445, 0)
+
+ ts := TimeFromUnix(goTime.Unix())
+ if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
+ t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
+ }
+
+ earlier := ts.Add(-duration)
+ delta := ts.Sub(earlier)
+ if delta != duration {
+ t.Fatalf("Expected %s to be equal to %s", delta, duration)
+ }
+}
+
+func TestParseDuration(t *testing.T) {
+ var cases = []struct {
+ in string
+ out time.Duration
+ }{
+ {
+ in: "324ms",
+ out: 324 * time.Millisecond,
+ }, {
+ in: "3s",
+ out: 3 * time.Second,
+ }, {
+ in: "5m",
+ out: 5 * time.Minute,
+ }, {
+ in: "1h",
+ out: time.Hour,
+ }, {
+ in: "4d",
+ out: 4 * 24 * time.Hour,
+ }, {
+ in: "3w",
+ out: 3 * 7 * 24 * time.Hour,
+ }, {
+ in: "10y",
+ out: 10 * 365 * 24 * time.Hour,
+ },
+ }
+
+ for _, c := range cases {
+ d, err := ParseDuration(c.in)
+ if err != nil {
+ t.Errorf("Unexpected error on input %q", c.in)
+ }
+ if time.Duration(d) != c.out {
+ t.Errorf("Expected %v but got %v", c.out, d)
+ }
+ if d.String() != c.in {
+ t.Errorf("Expected duration string %q but got %q", c.in, d.String())
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/common/model/value.go b/vendor/src/github.com/prometheus/common/model/value.go
new file mode 100644
index 00000000..7728abae
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/value.go
@@ -0,0 +1,419 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/src/github.com/prometheus/common/model/value_test.go b/vendor/src/github.com/prometheus/common/model/value_test.go
new file mode 100644
index 00000000..8d2b69ea
--- /dev/null
+++ b/vendor/src/github.com/prometheus/common/model/value_test.go
@@ -0,0 +1,417 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "math"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestEqual(t *testing.T) {
+ tests := map[string]struct {
+ in1, in2 SampleValue
+ want bool
+ }{
+ "equal floats": {
+ in1: 3.14,
+ in2: 3.14,
+ want: true,
+ },
+ "unequal floats": {
+ in1: 3.14,
+ in2: 3.1415,
+ want: false,
+ },
+ "positive inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(+1)),
+ want: true,
+ },
+ "negative inifinities": {
+ in1: SampleValue(math.Inf(-1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: true,
+ },
+ "different inifinities": {
+ in1: SampleValue(math.Inf(+1)),
+ in2: SampleValue(math.Inf(-1)),
+ want: false,
+ },
+ "number and infinity": {
+ in1: 42,
+ in2: SampleValue(math.Inf(+1)),
+ want: false,
+ },
+ "number and NaN": {
+ in1: 42,
+ in2: SampleValue(math.NaN()),
+ want: false,
+ },
+ "NaNs": {
+ in1: SampleValue(math.NaN()),
+ in2: SampleValue(math.NaN()),
+ want: true, // !!!
+ },
+ }
+
+ for name, test := range tests {
+ got := test.in1.Equal(test.in2)
+ if got != test.want {
+ t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want)
+ }
+ }
+}
+
+func TestSamplePairJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value SamplePair
+ }{
+ {
+ plain: `[1234.567,"123.1"]`,
+ value: SamplePair{
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sp SamplePair
+ err = json.Unmarshal(b, &sp)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sp != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sp)
+ }
+ }
+}
+
+func TestSampleJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Sample
+ }{
+ {
+ plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`,
+ value: Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Sample
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(sv, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Vector
+ }{
+ {
+ plain: `[]`,
+ value: Vector{},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`,
+ value: Vector{&Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ }},
+ },
+ {
+ plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`,
+ value: Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "test_metric",
+ },
+ Value: 123.1,
+ Timestamp: 1234567,
+ },
+ &Sample{
+ Metric: Metric{
+ "foo": "bar",
+ },
+ Value: SampleValue(math.Inf(1)),
+ Timestamp: 1234,
+ },
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var vec Vector
+ err = json.Unmarshal(b, &vec)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if !reflect.DeepEqual(vec, test.value) {
+ t.Errorf("decoding error: expected %v, got %v", test.value, vec)
+ }
+ }
+}
+
+func TestScalarJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value Scalar
+ }{
+ {
+ plain: `[123.456,"456"]`,
+ value: Scalar{
+ Timestamp: 123456,
+ Value: 456,
+ },
+ },
+ {
+ plain: `[123123.456,"+Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(1)),
+ },
+ },
+ {
+ plain: `[123123.456,"-Inf"]`,
+ value: Scalar{
+ Timestamp: 123123456,
+ Value: SampleValue(math.Inf(-1)),
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv Scalar
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestStringJSON(t *testing.T) {
+ input := []struct {
+ plain string
+ value String
+ }{
+ {
+ plain: `[123.456,"test"]`,
+ value: String{
+ Timestamp: 123456,
+ Value: "test",
+ },
+ },
+ {
+ plain: `[123123.456,"台北"]`,
+ value: String{
+ Timestamp: 123123456,
+ Value: "台北",
+ },
+ },
+ }
+
+ for _, test := range input {
+ b, err := json.Marshal(test.value)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if string(b) != test.plain {
+ t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+ continue
+ }
+
+ var sv String
+ err = json.Unmarshal(b, &sv)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if sv != test.value {
+ t.Errorf("decoding error: expected %v, got %v", test.value, sv)
+ }
+ }
+}
+
+func TestVectorSort(t *testing.T) {
+ input := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ expected := Vector{
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "A",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "B",
+ },
+ Timestamp: 2,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 1,
+ },
+ &Sample{
+ Metric: Metric{
+ MetricNameLabel: "C",
+ },
+ Timestamp: 2,
+ },
+ }
+
+ sort.Sort(input)
+
+ for i, actual := range input {
+ actualFp := actual.Metric.Fingerprint()
+ expectedFp := expected[i].Metric.Fingerprint()
+
+ if actualFp != expectedFp {
+ t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
+ }
+
+ if actual.Timestamp != expected[i].Timestamp {
+ t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/AUTHORS.md b/vendor/src/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 00000000..d5586356
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,21 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian
+* Bjoern Rabenstein
+* David Cournapeau
+* Ji-Hoon, Seol
+* Jonas Große Sundrup
+* Julius Volz
+* Matt Layher
+* Matthias Rampke
+* Nicky Gerritsen
+* Rémi Audebert
+* Tobias Schmidt
diff --git a/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 00000000..5705f0fb
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/src/github.com/prometheus/procfs/LICENSE b/vendor/src/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/prometheus/procfs/Makefile b/vendor/src/github.com/prometheus/procfs/Makefile
new file mode 100644
index 00000000..c264a49d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ ! gofmt -l *.go | read nothing
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/src/github.com/prometheus/procfs/NOTICE b/vendor/src/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 00000000..53c5e9aa
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/src/github.com/prometheus/procfs/README.md b/vendor/src/github.com/prometheus/procfs/README.md
new file mode 100644
index 00000000..6e7ee6b8
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/src/github.com/prometheus/procfs/doc.go b/vendor/src/github.com/prometheus/procfs/doc.go
new file mode 100644
index 00000000..e2acd6d4
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/cmdline b/vendor/src/github.com/prometheus/procfs/fixtures/26231/cmdline
new file mode 100644
index 00000000..d2d8ef88
Binary files /dev/null and b/vendor/src/github.com/prometheus/procfs/fixtures/26231/cmdline differ
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/comm b/vendor/src/github.com/prometheus/procfs/fixtures/26231/comm
new file mode 100644
index 00000000..f027e0d4
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26231/comm
@@ -0,0 +1 @@
+vim
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/io b/vendor/src/github.com/prometheus/procfs/fixtures/26231/io
new file mode 100644
index 00000000..b6210a7a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26231/io
@@ -0,0 +1,7 @@
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/limits b/vendor/src/github.com/prometheus/procfs/fixtures/26231/limits
new file mode 100644
index 00000000..23c6b689
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26231/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/mountstats b/vendor/src/github.com/prometheus/procfs/fixtures/26231/mountstats
new file mode 100644
index 00000000..a665c33d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26231/mountstats
@@ -0,0 +1,19 @@
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+ opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
+ age: 13968
+ caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+ nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+ sec: flavor=1,pseudoflavor=1
+ events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+ bytes: 1207640230 0 0 0 1210214218 0 295483 0
+ RPC iostats version: 1.0 p/v: 100003/4 (nfs)
+ xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+ per-op statistics
+ NULL: 0 0 0 0 0 0 0 0
+ READ: 1298 1298 0 207680 1210292152 6 79386 79407
+ WRITE: 0 0 0 0 0 0 0 0
+
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26231/stat b/vendor/src/github.com/prometheus/procfs/fixtures/26231/stat
new file mode 100644
index 00000000..438aaa9d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26231/stat
@@ -0,0 +1 @@
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26232/cmdline b/vendor/src/github.com/prometheus/procfs/fixtures/26232/cmdline
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26232/comm b/vendor/src/github.com/prometheus/procfs/fixtures/26232/comm
new file mode 100644
index 00000000..62361ca7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26232/comm
@@ -0,0 +1 @@
+ata_sff
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26232/limits b/vendor/src/github.com/prometheus/procfs/fixtures/26232/limits
new file mode 100644
index 00000000..3f9bf16a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26232/limits
@@ -0,0 +1,17 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 29436 29436 processes
+Max open files 1024 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 29436 29436 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/26232/stat b/vendor/src/github.com/prometheus/procfs/fixtures/26232/stat
new file mode 100644
index 00000000..321b1607
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/26232/stat
@@ -0,0 +1 @@
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/584/stat b/vendor/src/github.com/prometheus/procfs/fixtures/584/stat
new file mode 100644
index 00000000..65b9369d
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/584/stat
@@ -0,0 +1,2 @@
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/mdstat b/vendor/src/github.com/prometheus/procfs/fixtures/mdstat
new file mode 100644
index 00000000..4430bdee
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/mdstat
@@ -0,0 +1,26 @@
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+ 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+
+md127 : active raid1 sdi2[0] sdj2[1]
+ 312319552 blocks [2/2] [UU]
+
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+ 248896 blocks [2/2] [UU]
+
+md4 : inactive raid1 sda3[0] sdb3[1]
+ 4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+ 195310144 blocks [2/1] [U_]
+ [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+ 195310144 blocks [2/2] [UU]
+ [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+ 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+ bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices:
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs b/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs
new file mode 100644
index 00000000..6a6a97d7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs
@@ -0,0 +1,14 @@
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+ -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP C0A80016:0CEA wlc
+ -> C0A85216:0CEA Tunnel 100 248 2
+ -> C0A85318:0CEA Tunnel 100 248 2
+ -> C0A85315:0CEA Tunnel 100 248 1
+TCP C0A80039:0CEA wlc
+ -> C0A85416:0CEA Tunnel 0 0 0
+ -> C0A85215:0CEA Tunnel 100 1499 0
+ -> C0A83215:0CEA Tunnel 100 1498 0
+TCP C0A80037:0CEA wlc
+ -> C0A8321A:0CEA Tunnel 0 0 0
+ -> C0A83120:0CEA Tunnel 100 0 0
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats b/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats
new file mode 100644
index 00000000..c00724e0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/net/ip_vs_stats
@@ -0,0 +1,6 @@
+ Total Incoming Outgoing Incoming Outgoing
+ Conns Packets Packets Bytes Bytes
+ 16AA370 E33656E5 0 51D8C8883AB3 0
+
+ Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
+ 4 1FB3C 0 1282A8F 0
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/stat b/vendor/src/github.com/prometheus/procfs/fixtures/stat
new file mode 100644
index 00000000..dabb96f7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/stat
@@ -0,0 +1,16 @@
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 0
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/README b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/README
new file mode 100644
index 00000000..5cf184ea
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/README
@@ -0,0 +1,2 @@
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/abc b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/abc
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/def b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/def
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/ghi b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/ghi
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/uvw b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/uvw
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/xyz b/vendor/src/github.com/prometheus/procfs/fixtures/symlinktargets/xyz
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/src/github.com/prometheus/procfs/fs.go b/vendor/src/github.com/prometheus/procfs/fs.go
new file mode 100644
index 00000000..49aaab05
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,33 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/src/github.com/prometheus/procfs/fs_test.go b/vendor/src/github.com/prometheus/procfs/fs_test.go
new file mode 100644
index 00000000..91f1c6c9
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/fs_test.go
@@ -0,0 +1,13 @@
+package procfs
+
+import "testing"
+
+func TestNewFS(t *testing.T) {
+ if _, err := NewFS("foobar"); err == nil {
+ t.Error("want NewFS to fail for non-existing mount point")
+ }
+
+ if _, err := NewFS("procfs.go"); err == nil {
+ t.Error("want NewFS to fail if mount point is not a directory")
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/ipvs.go b/vendor/src/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 00000000..e7012f73
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/ipvs_test.go b/vendor/src/github.com/prometheus/procfs/ipvs_test.go
new file mode 100644
index 00000000..c836c23a
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/ipvs_test.go
@@ -0,0 +1,190 @@
+package procfs
+
+import (
+ "net"
+ "testing"
+)
+
+var (
+ expectedIPVSStats = IPVSStats{
+ Connections: 23765872,
+ IncomingPackets: 3811989221,
+ OutgoingPackets: 0,
+ IncomingBytes: 89991519156915,
+ OutgoingBytes: 0,
+ }
+ expectedIPVSBackendStatuses = []IPVSBackendStatus{
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.82.22"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 2,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.83.24"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 2,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.22"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.83.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 248,
+ InactConn: 1,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.84.22"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 0,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.82.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 1499,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.57"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.50.21"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 1498,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.55"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.50.26"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 0,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ IPVSBackendStatus{
+ LocalAddress: net.ParseIP("192.168.0.55"),
+ LocalPort: 3306,
+ RemoteAddress: net.ParseIP("192.168.49.32"),
+ RemotePort: 3306,
+ Proto: "TCP",
+ Weight: 100,
+ ActiveConn: 0,
+ InactConn: 0,
+ },
+ }
+)
+
+func TestIPVSStats(t *testing.T) {
+ stats, err := FS("fixtures").NewIPVSStats()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stats != expectedIPVSStats {
+ t.Errorf("want %+v, have %+v", expectedIPVSStats, stats)
+ }
+}
+
+func TestParseIPPort(t *testing.T) {
+ ip := net.ParseIP("192.168.0.22")
+ port := uint16(3306)
+
+ gotIP, gotPort, err := parseIPPort("C0A80016:0CEA")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(gotIP.Equal(ip) && port == gotPort) {
+ t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort)
+ }
+}
+
+func TestParseIPPortInvalid(t *testing.T) {
+ testcases := []string{
+ "",
+ "C0A80016",
+ "C0A800:1234",
+ "FOOBARBA:1234",
+ "C0A80016:0CEA:1234",
+ }
+
+ for _, s := range testcases {
+ ip, port, err := parseIPPort(s)
+ if ip != nil || port != uint16(0) || err == nil {
+ t.Errorf("Expected error for input %s, have ip = %s, port = %v, err = %v", s, ip, port, err)
+ }
+ }
+}
+
+func TestParseIPPortIPv6(t *testing.T) {
+ ip := net.ParseIP("dead:beef::1")
+ port := uint16(8080)
+
+ gotIP, gotPort, err := parseIPPort("DEADBEEF000000000000000000000001:1F90")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(gotIP.Equal(ip) && port == gotPort) {
+ t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort)
+ }
+
+}
+
+func TestIPVSBackendStatus(t *testing.T) {
+ backendStats, err := FS("fixtures").NewIPVSBackendStatus()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := len(expectedIPVSBackendStatuses), len(backendStats); want != have {
+ t.Fatalf("want %d backend statuses, have %d", want, have)
+ }
+
+ for idx, expect := range expectedIPVSBackendStatuses {
+ if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) {
+ t.Errorf("want LocalAddress %s, have %s", expect.LocalAddress, backendStats[idx].LocalAddress)
+ }
+ if backendStats[idx].LocalPort != expect.LocalPort {
+ t.Errorf("want LocalPort %d, have %d", expect.LocalPort, backendStats[idx].LocalPort)
+ }
+ if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) {
+ t.Errorf("want RemoteAddress %s, have %s", expect.RemoteAddress, backendStats[idx].RemoteAddress)
+ }
+ if backendStats[idx].RemotePort != expect.RemotePort {
+ t.Errorf("want RemotePort %d, have %d", expect.RemotePort, backendStats[idx].RemotePort)
+ }
+ if backendStats[idx].Proto != expect.Proto {
+ t.Errorf("want Proto %s, have %s", expect.Proto, backendStats[idx].Proto)
+ }
+ if backendStats[idx].Weight != expect.Weight {
+ t.Errorf("want Weight %d, have %d", expect.Weight, backendStats[idx].Weight)
+ }
+ if backendStats[idx].ActiveConn != expect.ActiveConn {
+ t.Errorf("want ActiveConn %d, have %d", expect.ActiveConn, backendStats[idx].ActiveConn)
+ }
+ if backendStats[idx].InactConn != expect.InactConn {
+ t.Errorf("want InactConn %d, have %d", expect.InactConn, backendStats[idx].InactConn)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/mdstat.go b/vendor/src/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 00000000..d7a248c0
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/mdstat_test.go b/vendor/src/github.com/prometheus/procfs/mdstat_test.go
new file mode 100644
index 00000000..ca5fe4d1
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/mdstat_test.go
@@ -0,0 +1,31 @@
+package procfs
+
+import (
+ "testing"
+)
+
+func TestMDStat(t *testing.T) {
+ mdStates, err := FS("fixtures").ParseMDStat()
+ if err != nil {
+ t.Fatalf("parsing of reference-file failed entirely: %s", err)
+ }
+
+ refs := map[string]MDStat{
+ "md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288},
+ "md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552},
+ "md0": MDStat{"md0", "active", 2, 2, 248896, 248896},
+ "md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648},
+ "md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552},
+ "md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552},
+ "md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424},
+ }
+
+ if want, have := len(refs), len(mdStates); want != have {
+ t.Errorf("want %d parsed md-devices, have %d", want, have)
+ }
+ for _, md := range mdStates {
+ if want, have := refs[md.Name], md; want != have {
+ t.Errorf("%s: want %v, have %v", md.Name, want, have)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/mountstats.go b/vendor/src/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 00000000..47ab0a74
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,552 @@
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10Len = 10
+ fieldTransport11Len = 13
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attemped on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them seperately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ switch statVersion {
+ case statVersion10:
+ if len(ss) != fieldTransport10Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ if len(ss) != fieldTransport11Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response
+ ns := make([]uint64, 0, fieldTransport11Len)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSTransportStats{
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/mountstats_test.go b/vendor/src/github.com/prometheus/procfs/mountstats_test.go
new file mode 100644
index 00000000..e6570793
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/mountstats_test.go
@@ -0,0 +1,252 @@
+package procfs
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMountStats(t *testing.T) {
+ tests := []struct {
+ name string
+ s string
+ fs bool
+ mounts []*Mount
+ invalid bool
+ }{
+ {
+ name: "no devices",
+ s: `hello`,
+ },
+ {
+ name: "device has too few fields",
+ s: `device foo`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs BAD on / with fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted BAD / with fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted on / BAD fstype rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device incorrect format",
+ s: `device rootfs mounted on / with BAD rootfs`,
+ invalid: true,
+ },
+ {
+ name: "device rootfs cannot have stats",
+ s: `device rootfs mounted on / with fstype rootfs stats`,
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with too little info",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nhello",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad bytes",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nbytes: 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad events",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nevents: 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad per-op stats",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nper-op statistics\nFOO 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport version",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=foo\nxprt: tcp 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats version 1.0",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.0\nxprt: tcp 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ invalid: true,
+ },
+ {
+ name: "NFSv4 device with bad transport stats version 1.1",
+ s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp 0 0 0 0 0 0 0 0 0 0",
+ invalid: true,
+ },
+ {
+ name: "device rootfs OK",
+ s: `device rootfs mounted on / with fstype rootfs`,
+ mounts: []*Mount{{
+ Device: "rootfs",
+ Mount: "/",
+ Type: "rootfs",
+ }},
+ },
+ {
+ name: "NFSv3 device with minimal stats OK",
+ s: `device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.1`,
+ mounts: []*Mount{{
+ Device: "192.168.1.1:/srv",
+ Mount: "/mnt/nfs",
+ Type: "nfs",
+ Stats: &MountStatsNFS{
+ StatVersion: "1.1",
+ },
+ }},
+ },
+ {
+ name: "fixtures OK",
+ fs: true,
+ mounts: []*Mount{
+ {
+ Device: "rootfs",
+ Mount: "/",
+ Type: "rootfs",
+ },
+ {
+ Device: "sysfs",
+ Mount: "/sys",
+ Type: "sysfs",
+ },
+ {
+ Device: "proc",
+ Mount: "/proc",
+ Type: "proc",
+ },
+ {
+ Device: "/dev/sda1",
+ Mount: "/",
+ Type: "ext4",
+ },
+ {
+ Device: "192.168.1.1:/srv/test",
+ Mount: "/mnt/nfs/test",
+ Type: "nfs4",
+ Stats: &MountStatsNFS{
+ StatVersion: "1.1",
+ Age: 13968 * time.Second,
+ Bytes: NFSBytesStats{
+ Read: 1207640230,
+ ReadTotal: 1210214218,
+ ReadPages: 295483,
+ },
+ Events: NFSEventsStats{
+ InodeRevalidate: 52,
+ DnodeRevalidate: 226,
+ VFSOpen: 1,
+ VFSLookup: 13,
+ VFSAccess: 398,
+ VFSReadPages: 331,
+ VFSWritePages: 47,
+ VFSFlush: 77,
+ VFSFileRelease: 77,
+ },
+ Operations: []NFSOperationStats{
+ {
+ Operation: "NULL",
+ },
+ {
+ Operation: "READ",
+ Requests: 1298,
+ Transmissions: 1298,
+ BytesSent: 207680,
+ BytesReceived: 1210292152,
+ CumulativeQueueTime: 6 * time.Millisecond,
+ CumulativeTotalResponseTime: 79386 * time.Millisecond,
+ CumulativeTotalRequestTime: 79407 * time.Millisecond,
+ },
+ {
+ Operation: "WRITE",
+ },
+ },
+ Transport: NFSTransportStats{
+ Port: 832,
+ Connect: 1,
+ IdleTime: 11 * time.Second,
+ Sends: 6428,
+ Receives: 6428,
+ CumulativeActiveRequests: 12154,
+ MaximumRPCSlotsUsed: 24,
+ CumulativeSendingQueue: 26,
+ CumulativePendingQueue: 5726,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ t.Logf("[%02d] test %q", i, tt.name)
+
+ var mounts []*Mount
+ var err error
+
+ if tt.s != "" {
+ mounts, err = parseMountStats(strings.NewReader(tt.s))
+ }
+ if tt.fs {
+ proc, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatalf("failed to create proc: %v", err)
+ }
+
+ mounts, err = proc.MountStats()
+ }
+
+ if tt.invalid && err == nil {
+ t.Error("expected an error, but none occurred")
+ }
+ if !tt.invalid && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+
+ if want, have := tt.mounts, mounts; !reflect.DeepEqual(want, have) {
+ t.Errorf("mounts:\nwant:\n%v\nhave:\n%v", mountsStr(want), mountsStr(have))
+ }
+ }
+}
+
+func mountsStr(mounts []*Mount) string {
+ var out string
+ for i, m := range mounts {
+ out += fmt.Sprintf("[%d] %q on %q (%q)", i, m.Device, m.Mount, m.Type)
+
+ stats, ok := m.Stats.(*MountStatsNFS)
+ if !ok {
+ out += "\n"
+ continue
+ }
+
+ out += fmt.Sprintf("\n\t- v%s, age: %s", stats.StatVersion, stats.Age)
+ out += fmt.Sprintf("\n\t- bytes: %v", stats.Bytes)
+ out += fmt.Sprintf("\n\t- events: %v", stats.Events)
+ out += fmt.Sprintf("\n\t- transport: %v", stats.Transport)
+ out += fmt.Sprintf("\n\t- per-operation stats:")
+
+ for _, o := range stats.Operations {
+ out += fmt.Sprintf("\n\t\t- %v", o)
+ }
+
+ out += "\n"
+ }
+
+ return out
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc.go b/vendor/src/github.com/prometheus/procfs/proc.go
new file mode 100644
index 00000000..8717e1fe
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_io.go b/vendor/src/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 00000000..b4e31d7b
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc//io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_io_test.go b/vendor/src/github.com/prometheus/procfs/proc_io_test.go
new file mode 100644
index 00000000..3aa1a129
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_io_test.go
@@ -0,0 +1,33 @@
+package procfs
+
+import "testing"
+
+func TestProcIO(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := p.NewIO()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int64
+ have int64
+ }{
+ {name: "RChar", want: 750339, have: int64(s.RChar)},
+ {name: "WChar", want: 818609, have: int64(s.WChar)},
+ {name: "SyscR", want: 7405, have: int64(s.SyscR)},
+ {name: "SyscW", want: 5245, have: int64(s.SyscW)},
+ {name: "ReadBytes", want: 1024, have: int64(s.ReadBytes)},
+ {name: "WriteBytes", want: 2048, have: int64(s.WriteBytes)},
+ {name: "CancelledWriteBytes", want: -1024, have: s.CancelledWriteBytes},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_limits.go b/vendor/src/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 00000000..2df997ce
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_limits_test.go b/vendor/src/github.com/prometheus/procfs/proc_limits_test.go
new file mode 100644
index 00000000..70bf04ec
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_limits_test.go
@@ -0,0 +1,31 @@
+package procfs
+
+import "testing"
+
+func TestNewLimits(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ l, err := p.NewLimits()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int
+ have int
+ }{
+ {name: "cpu time", want: -1, have: l.CPUTime},
+ {name: "open files", want: 2048, have: l.OpenFiles},
+ {name: "msgqueue size", want: 819200, have: l.MsqqueueSize},
+ {name: "nice priority", want: 0, have: l.NicePriority},
+ {name: "address space", want: -1, have: l.AddressSpace},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_stat.go b/vendor/src/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 00000000..724e271b
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_stat_test.go b/vendor/src/github.com/prometheus/procfs/proc_stat_test.go
new file mode 100644
index 00000000..a2ebcde7
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_stat_test.go
@@ -0,0 +1,110 @@
+package procfs
+
+import (
+ "os"
+ "testing"
+)
+
+func TestProcStat(t *testing.T) {
+ p, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := p.NewStat()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, test := range []struct {
+ name string
+ want int
+ have int
+ }{
+ {name: "pid", want: 26231, have: s.PID},
+ {name: "user time", want: 1677, have: int(s.UTime)},
+ {name: "system time", want: 44, have: int(s.STime)},
+ {name: "start time", want: 82375, have: int(s.Starttime)},
+ {name: "virtual memory size", want: 56274944, have: s.VSize},
+ {name: "resident set size", want: 1981, have: s.RSS},
+ } {
+ if test.want != test.have {
+ t.Errorf("want %s %d, have %d", test.name, test.want, test.have)
+ }
+ }
+}
+
+func TestProcStatComm(t *testing.T) {
+ s1, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "vim", s1.Comm; want != have {
+ t.Errorf("want comm %s, have %s", want, have)
+ }
+
+ s2, err := testProcStat(584)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := "(a b ) ( c d) ", s2.Comm; want != have {
+ t.Errorf("want comm %s, have %s", want, have)
+ }
+}
+
+func TestProcStatVirtualMemory(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 56274944, s.VirtualMemory(); want != have {
+ t.Errorf("want virtual memory %d, have %d", want, have)
+ }
+}
+
+func TestProcStatResidentMemory(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 1981*os.Getpagesize(), s.ResidentMemory(); want != have {
+ t.Errorf("want resident memory %d, have %d", want, have)
+ }
+}
+
+func TestProcStatStartTime(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time, err := s.StartTime()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := 1418184099.75, time; want != have {
+ t.Errorf("want start time %f, have %f", want, have)
+ }
+}
+
+func TestProcStatCPUTime(t *testing.T) {
+ s, err := testProcStat(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := 17.21, s.CPUTime(); want != have {
+ t.Errorf("want cpu time %f, have %f", want, have)
+ }
+}
+
+func testProcStat(pid int) (ProcStat, error) {
+ p, err := FS("fixtures").NewProc(pid)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return p.NewStat()
+}
diff --git a/vendor/src/github.com/prometheus/procfs/proc_test.go b/vendor/src/github.com/prometheus/procfs/proc_test.go
new file mode 100644
index 00000000..104b3245
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/proc_test.go
@@ -0,0 +1,160 @@
+package procfs
+
+import (
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestSelf(t *testing.T) {
+ fs := FS("fixtures")
+
+ p1, err := fs.NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2, err := fs.Self()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(p1, p2) {
+ t.Errorf("want process %v, have %v", p1, p2)
+ }
+}
+
+func TestAllProcs(t *testing.T) {
+ procs, err := FS("fixtures").AllProcs()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Sort(procs)
+ for i, p := range []*Proc{{PID: 584}, {PID: 26231}} {
+ if want, have := p.PID, procs[i].PID; want != have {
+ t.Errorf("want processes %d, have %d", want, have)
+ }
+ }
+}
+
+func TestCmdLine(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want []string
+ }{
+ {process: 26231, want: []string{"vim", "test.go", "+10"}},
+ {process: 26232, want: []string{}},
+ } {
+ p1, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1, err := p1.CmdLine()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, c1) {
+ t.Errorf("want cmdline %v, have %v", tt.want, c1)
+ }
+ }
+}
+
+func TestComm(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want string
+ }{
+ {process: 26231, want: "vim"},
+ {process: 26232, want: "ata_sff"},
+ } {
+ p1, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1, err := p1.Comm()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, c1) {
+ t.Errorf("want comm %v, have %v", tt.want, c1)
+ }
+ }
+}
+
+func TestExecutable(t *testing.T) {
+ for _, tt := range []struct {
+ process int
+ want string
+ }{
+ {process: 26231, want: "/usr/bin/vim"},
+ {process: 26232, want: ""},
+ } {
+ p, err := FS("fixtures").NewProc(tt.process)
+ if err != nil {
+ t.Fatal(err)
+ }
+ exe, err := p.Executable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(tt.want, exe) {
+ t.Errorf("want absolute path to cmdline %v, have %v", tt.want, exe)
+ }
+ }
+}
+
+func TestFileDescriptors(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fds, err := p1.FileDescriptors()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Sort(byUintptr(fds))
+ if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) {
+ t.Errorf("want fds %v, have %v", want, fds)
+ }
+}
+
+func TestFileDescriptorTargets(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fds, err := p1.FileDescriptorTargets()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(fds)
+ var want = []string{
+ "../../symlinktargets/abc",
+ "../../symlinktargets/def",
+ "../../symlinktargets/ghi",
+ "../../symlinktargets/uvw",
+ "../../symlinktargets/xyz",
+ }
+ if !reflect.DeepEqual(want, fds) {
+ t.Errorf("want fds %v, have %v", want, fds)
+ }
+}
+
+func TestFileDescriptorsLen(t *testing.T) {
+ p1, err := FS("fixtures").NewProc(26231)
+ if err != nil {
+ t.Fatal(err)
+ }
+ l, err := p1.FileDescriptorsLen()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want, have := 5, l; want != have {
+ t.Errorf("want fds %d, have %d", want, have)
+ }
+}
+
+type byUintptr []uintptr
+
+func (a byUintptr) Len() int { return len(a) }
+func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] }
diff --git a/vendor/src/github.com/prometheus/procfs/stat.go b/vendor/src/github.com/prometheus/procfs/stat.go
new file mode 100644
index 00000000..1ca217e8
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/src/github.com/prometheus/procfs/stat_test.go b/vendor/src/github.com/prometheus/procfs/stat_test.go
new file mode 100644
index 00000000..6eb79247
--- /dev/null
+++ b/vendor/src/github.com/prometheus/procfs/stat_test.go
@@ -0,0 +1,14 @@
+package procfs
+
+import "testing"
+
+func TestStat(t *testing.T) {
+ s, err := FS("fixtures").NewStat()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want, have := int64(1418183276), s.BootTime; want != have {
+ t.Errorf("want boot time %d, have %d", want, have)
+ }
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/src/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 00000000..f2c2bc21
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/src/github.com/sirupsen/logrus/LICENSE b/vendor/src/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 00000000..f090cb42
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/src/github.com/sirupsen/logrus/README.md b/vendor/src/github.com/sirupsen/logrus/README.md
new file mode 100644
index 00000000..206c746c
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr, could also be a file.
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/src/github.com/sirupsen/logrus/alt_exit.go b/vendor/src/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 00000000..b4c9e847
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/alt_exit_test.go b/vendor/src/github.com/sirupsen/logrus/alt_exit_test.go
new file mode 100644
index 00000000..022b7783
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/alt_exit_test.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "io/ioutil"
+ "os/exec"
+ "testing"
+ "time"
+)
+
+func TestRegister(t *testing.T) {
+ current := len(handlers)
+ RegisterExitHandler(func() {})
+ if len(handlers) != current+1 {
+ t.Fatalf("can't add handler")
+ }
+}
+
+func TestHandler(t *testing.T) {
+ gofile := "/tmp/testprog.go"
+ if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
+ t.Fatalf("can't create go file")
+ }
+
+ outfile := "/tmp/testprog.out"
+ arg := time.Now().UTC().String()
+ err := exec.Command("go", "run", gofile, outfile, arg).Run()
+ if err == nil {
+ t.Fatalf("completed normally, should have failed")
+ }
+
+ data, err := ioutil.ReadFile(outfile)
+ if err != nil {
+ t.Fatalf("can't read output file %s", outfile)
+ }
+
+ if string(data) != arg {
+ t.Fatalf("bad data")
+ }
+}
+
+var testprog = []byte(`
+// Test program for atexit, gets output file and data as arguments and writes
+// data to output file in atexit handler.
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "flag"
+ "fmt"
+ "io/ioutil"
+)
+
+var outfile = ""
+var data = ""
+
+func handler() {
+ ioutil.WriteFile(outfile, []byte(data), 0666)
+}
+
+func badHandler() {
+ n := 0
+ fmt.Println(1/n)
+}
+
+func main() {
+ flag.Parse()
+ outfile = flag.Arg(0)
+ data = flag.Arg(1)
+
+ logrus.RegisterExitHandler(handler)
+ logrus.RegisterExitHandler(badHandler)
+ logrus.Fatal("Bye bye")
+}
+`)
diff --git a/vendor/src/github.com/sirupsen/logrus/doc.go b/vendor/src/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 00000000..dddd5f87
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/src/github.com/sirupsen/logrus/entry.go b/vendor/src/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 00000000..4edbe7a2
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/entry_test.go b/vendor/src/github.com/sirupsen/logrus/entry_test.go
new file mode 100644
index 00000000..99c3b41d
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/entry_test.go
@@ -0,0 +1,77 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestEntryWithError(t *testing.T) {
+
+ assert := assert.New(t)
+
+ defer func() {
+ ErrorKey = "error"
+ }()
+
+ err := fmt.Errorf("kaboom at layer %d", 4711)
+
+ assert.Equal(err, WithError(err).Data["error"])
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+
+ assert.Equal(err, entry.WithError(err).Data["error"])
+
+ ErrorKey = "err"
+
+ assert.Equal(err, entry.WithError(err).Data["err"])
+
+}
+
+func TestEntryPanicln(t *testing.T) {
+ errBoom := fmt.Errorf("boom time")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicln("kaboom")
+}
+
+func TestEntryPanicf(t *testing.T) {
+ errBoom := fmt.Errorf("boom again")
+
+ defer func() {
+ p := recover()
+ assert.NotNil(t, p)
+
+ switch pVal := p.(type) {
+ case *Entry:
+ assert.Equal(t, "kaboom true", pVal.Message)
+ assert.Equal(t, errBoom, pVal.Data["err"])
+ default:
+ t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
+ }
+ }()
+
+ logger := New()
+ logger.Out = &bytes.Buffer{}
+ entry := NewEntry(logger)
+ entry.WithField("err", errBoom).Panicf("kaboom %v", true)
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/examples/basic/basic.go b/vendor/src/github.com/sirupsen/logrus/examples/basic/basic.go
new file mode 100644
index 00000000..a1623ec0
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/examples/basic/basic.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.JSONFormatter)
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Level = logrus.DebugLevel
+}
+
+func main() {
+ defer func() {
+ err := recover()
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "err": err,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+ }
+ }()
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "number": 8,
+ }).Debug("Started observing beach")
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "temperature": -4,
+ }).Debug("Temperature changes")
+
+ log.WithFields(logrus.Fields{
+ "animal": "orca",
+ "size": 9009,
+ }).Panic("It's over 9000!")
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/examples/hook/hook.go b/vendor/src/github.com/sirupsen/logrus/examples/hook/hook.go
new file mode 100644
index 00000000..3187f6d3
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/examples/hook/hook.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+)
+
+var log = logrus.New()
+
+func init() {
+ log.Formatter = new(logrus.TextFormatter) // default
+ log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
+}
+
+func main() {
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(logrus.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/exported.go b/vendor/src/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 00000000..9a0120ac
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/formatter.go b/vendor/src/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 00000000..b5fbe934
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/formatter_bench_test.go b/vendor/src/github.com/sirupsen/logrus/formatter_bench_test.go
new file mode 100644
index 00000000..c6d290c7
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/formatter_bench_test.go
@@ -0,0 +1,98 @@
+package logrus
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+// smallFields is a small size data set for benchmarking
+var smallFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+// largeFields is a large size data set for benchmarking
+var largeFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+ "five": "six",
+ "seven": "eight",
+ "nine": "ten",
+ "eleven": "twelve",
+ "thirteen": "fourteen",
+ "fifteen": "sixteen",
+ "seventeen": "eighteen",
+ "nineteen": "twenty",
+ "a": "b",
+ "c": "d",
+ "e": "f",
+ "g": "h",
+ "i": "j",
+ "k": "l",
+ "m": "n",
+ "o": "p",
+ "q": "r",
+ "s": "t",
+ "u": "v",
+ "w": "x",
+ "y": "z",
+ "this": "will",
+ "make": "thirty",
+ "entries": "yeah",
+}
+
+var errorFields = Fields{
+ "foo": fmt.Errorf("bar"),
+ "baz": fmt.Errorf("qux"),
+}
+
+func BenchmarkErrorTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
+}
+
+func BenchmarkSmallTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkLargeTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
+}
+
+func BenchmarkSmallColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
+}
+
+func BenchmarkLargeColoredTextFormatter(b *testing.B) {
+ doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
+}
+
+func BenchmarkSmallJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, smallFields)
+}
+
+func BenchmarkLargeJSONFormatter(b *testing.B) {
+ doBenchmark(b, &JSONFormatter{}, largeFields)
+}
+
+func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
+ entry := &Entry{
+ Time: time.Time{},
+ Level: InfoLevel,
+ Message: "message",
+ Data: fields,
+ }
+ var d []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ d, err = formatter.Format(entry)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(d)))
+ }
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hook_test.go b/vendor/src/github.com/sirupsen/logrus/hook_test.go
new file mode 100644
index 00000000..13f34cb6
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hook_test.go
@@ -0,0 +1,122 @@
+package logrus
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type TestHook struct {
+ Fired bool
+}
+
+func (hook *TestHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *TestHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookFires(t *testing.T) {
+ hook := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ assert.Equal(t, hook.Fired, false)
+
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
+
+type ModifyHook struct {
+}
+
+func (hook *ModifyHook) Fire(entry *Entry) error {
+ entry.Data["wow"] = "whale"
+ return nil
+}
+
+func (hook *ModifyHook) Levels() []Level {
+ return []Level{
+ DebugLevel,
+ InfoLevel,
+ WarnLevel,
+ ErrorLevel,
+ FatalLevel,
+ PanicLevel,
+ }
+}
+
+func TestHookCanModifyEntry(t *testing.T) {
+ hook := new(ModifyHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ })
+}
+
+func TestCanFireMultipleHooks(t *testing.T) {
+ hook1 := new(ModifyHook)
+ hook2 := new(TestHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook1)
+ log.Hooks.Add(hook2)
+
+ log.WithField("wow", "elephant").Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["wow"], "whale")
+ assert.Equal(t, hook2.Fired, true)
+ })
+}
+
+type ErrorHook struct {
+ Fired bool
+}
+
+func (hook *ErrorHook) Fire(entry *Entry) error {
+ hook.Fired = true
+ return nil
+}
+
+func (hook *ErrorHook) Levels() []Level {
+ return []Level{
+ ErrorLevel,
+ }
+}
+
+func TestErrorHookShouldntFireOnInfo(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, false)
+ })
+}
+
+func TestErrorHookShouldFireOnError(t *testing.T) {
+ hook := new(ErrorHook)
+
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Hooks.Add(hook)
+ log.Error("test")
+ }, func(fields Fields) {
+ assert.Equal(t, hook.Fired, true)
+ })
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks.go b/vendor/src/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 00000000..3f151cdc
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks/syslog/README.md b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/README.md
new file mode 100644
index 00000000..066704b3
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/README.md
@@ -0,0 +1,39 @@
+# Syslog Hooks for Logrus
+
+## Usage
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
+
+If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+)
+
+func main() {
+ log := logrus.New()
+ hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
+
+ if err == nil {
+ log.Hooks.Add(hook)
+ }
+}
+```
\ No newline at end of file
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog.go b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog.go
new file mode 100644
index 00000000..a36e2003
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog.go
@@ -0,0 +1,54 @@
+// +build !windows,!nacl,!plan9
+
+package logrus_syslog
+
+import (
+ "fmt"
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "os"
+)
+
+// SyslogHook to send logs via syslog.
+type SyslogHook struct {
+ Writer *syslog.Writer
+ SyslogNetwork string
+ SyslogRaddr string
+}
+
+// Creates a hook to be added to an instance of logger. This is called with
+// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
+// `if err == nil { log.Hooks.Add(hook) }`
+func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
+ w, err := syslog.Dial(network, raddr, priority, tag)
+ return &SyslogHook{w, network, raddr}, err
+}
+
+func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
+ line, err := entry.String()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
+ return err
+ }
+
+ switch entry.Level {
+ case logrus.PanicLevel:
+ return hook.Writer.Crit(line)
+ case logrus.FatalLevel:
+ return hook.Writer.Crit(line)
+ case logrus.ErrorLevel:
+ return hook.Writer.Err(line)
+ case logrus.WarnLevel:
+ return hook.Writer.Warning(line)
+ case logrus.InfoLevel:
+ return hook.Writer.Info(line)
+ case logrus.DebugLevel:
+ return hook.Writer.Debug(line)
+ default:
+ return nil
+ }
+}
+
+func (hook *SyslogHook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
new file mode 100644
index 00000000..42762dc1
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
@@ -0,0 +1,26 @@
+package logrus_syslog
+
+import (
+ "github.com/Sirupsen/logrus"
+ "log/syslog"
+ "testing"
+)
+
+func TestLocalhostAddAndPrint(t *testing.T) {
+ log := logrus.New()
+ hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+
+ if err != nil {
+ t.Errorf("Unable to connect to local syslog.")
+ }
+
+ log.Hooks.Add(hook)
+
+ for _, level := range hook.Levels() {
+ if len(log.Hooks[level]) != 1 {
+ t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
+ }
+ }
+
+ log.Info("Congratulations!")
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks/test/test.go b/vendor/src/github.com/sirupsen/logrus/hooks/test/test.go
new file mode 100644
index 00000000..06881253
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks/test/test.go
@@ -0,0 +1,67 @@
+package test
+
+import (
+ "io/ioutil"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// test.Hook is a hook designed for dealing with logs in test scenarios.
+type Hook struct {
+ Entries []*logrus.Entry
+}
+
+// Installs a test hook for the global logger.
+func NewGlobal() *Hook {
+
+ hook := new(Hook)
+ logrus.AddHook(hook)
+
+ return hook
+
+}
+
+// Installs a test hook for a given local logger.
+func NewLocal(logger *logrus.Logger) *Hook {
+
+ hook := new(Hook)
+ logger.Hooks.Add(hook)
+
+ return hook
+
+}
+
+// Creates a discarding logger and installs the test hook.
+func NewNullLogger() (*logrus.Logger, *Hook) {
+
+ logger := logrus.New()
+ logger.Out = ioutil.Discard
+
+ return logger, NewLocal(logger)
+
+}
+
+func (t *Hook) Fire(e *logrus.Entry) error {
+ t.Entries = append(t.Entries, e)
+ return nil
+}
+
+func (t *Hook) Levels() []logrus.Level {
+ return logrus.AllLevels
+}
+
+// LastEntry returns the last entry that was logged or nil.
+func (t *Hook) LastEntry() (l *logrus.Entry) {
+
+ if i := len(t.Entries) - 1; i < 0 {
+ return nil
+ } else {
+ return t.Entries[i]
+ }
+
+}
+
+// Reset removes all Entries from this test hook.
+func (t *Hook) Reset() {
+ t.Entries = make([]*logrus.Entry, 0)
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/hooks/test/test_test.go b/vendor/src/github.com/sirupsen/logrus/hooks/test/test_test.go
new file mode 100644
index 00000000..d69455ba
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/hooks/test/test_test.go
@@ -0,0 +1,39 @@
+package test
+
+import (
+ "testing"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAllHooks(t *testing.T) {
+
+ assert := assert.New(t)
+
+ logger, hook := NewNullLogger()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ logger.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+ logger.Warn("Hello warning")
+ assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
+ assert.Equal("Hello warning", hook.LastEntry().Message)
+ assert.Equal(2, len(hook.Entries))
+
+ hook.Reset()
+ assert.Nil(hook.LastEntry())
+ assert.Equal(0, len(hook.Entries))
+
+ hook = NewGlobal()
+
+ logrus.Error("Hello error")
+ assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal("Hello error", hook.LastEntry().Message)
+ assert.Equal(1, len(hook.Entries))
+
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/json_formatter.go b/vendor/src/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 00000000..266554e9
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for various fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyLevel: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/json_formatter_test.go b/vendor/src/github.com/sirupsen/logrus/json_formatter_test.go
new file mode 100644
index 00000000..51093a79
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/json_formatter_test.go
@@ -0,0 +1,199 @@
+package logrus
+
+import (
+ "encoding/json"
+ "errors"
+ "strings"
+ "testing"
+)
+
+func TestErrorNotLost(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["error"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["omg"] != "wild walrus" {
+ t.Fatal("Error field not set")
+ }
+}
+
+func TestFieldClashWithTime(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("time", "right now!"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.time"] != "right now!" {
+ t.Fatal("fields.time not set to original time field")
+ }
+
+ if entry["time"] != "0001-01-01T00:00:00Z" {
+ t.Fatal("time field not set to current time, was: ", entry["time"])
+ }
+}
+
+func TestFieldClashWithMsg(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("msg", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.msg"] != "something" {
+ t.Fatal("fields.msg not set to original msg field")
+ }
+}
+
+func TestFieldClashWithLevel(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ entry := make(map[string]interface{})
+ err = json.Unmarshal(b, &entry)
+ if err != nil {
+ t.Fatal("Unable to unmarshal formatted entry: ", err)
+ }
+
+ if entry["fields.level"] != "something" {
+ t.Fatal("fields.level not set to original level field")
+ }
+}
+
+func TestJSONEntryEndsWithNewline(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+
+ if b[len(b)-1] != '\n' {
+ t.Fatal("Expected JSON log entry to end with a newline")
+ }
+}
+
+func TestJSONMessageKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyMsg: "message",
+ },
+ }
+
+ b, err := formatter.Format(&Entry{Message: "oh hai"})
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
+ t.Fatal("Expected JSON to format message key")
+ }
+}
+
+func TestJSONLevelKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyLevel: "somelevel",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "somelevel") {
+ t.Fatal("Expected JSON to format level key")
+ }
+}
+
+func TestJSONTimeKey(t *testing.T) {
+ formatter := &JSONFormatter{
+ FieldMap: FieldMap{
+ FieldKeyTime: "timeywimey",
+ },
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "timeywimey") {
+ t.Fatal("Expected JSON to format time key")
+ }
+}
+
+func TestJSONDisableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{
+ DisableTimestamp: true,
+ }
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if strings.Contains(s, FieldKeyTime) {
+ t.Error("Did not prevent timestamp", s)
+ }
+}
+
+func TestJSONEnableTimestamp(t *testing.T) {
+ formatter := &JSONFormatter{}
+
+ b, err := formatter.Format(WithField("level", "something"))
+ if err != nil {
+ t.Fatal("Unable to format entry: ", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, FieldKeyTime) {
+ t.Error("Timestamp not present", s)
+ }
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/logger.go b/vendor/src/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 00000000..b769f3d3
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/logger_bench_test.go b/vendor/src/github.com/sirupsen/logrus/logger_bench_test.go
new file mode 100644
index 00000000..dd23a353
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/logger_bench_test.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+ "os"
+ "testing"
+)
+
+// smallFields is a small size data set for benchmarking
+var loggerFields = Fields{
+ "foo": "bar",
+ "baz": "qux",
+ "one": "two",
+ "three": "four",
+}
+
+func BenchmarkDummyLogger(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func BenchmarkDummyLoggerNoLock(b *testing.B) {
+ nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ b.Fatalf("%v", err)
+ }
+ defer nullf.Close()
+ doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
+}
+
+func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
+
+func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
+ logger := Logger{
+ Out: out,
+ Level: InfoLevel,
+ Formatter: formatter,
+ }
+ logger.SetNoLock()
+ entry := logger.WithFields(fields)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ entry.Info("aaa")
+ }
+ })
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/logrus.go b/vendor/src/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 00000000..e5966911
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/logrus_test.go b/vendor/src/github.com/sirupsen/logrus/logrus_test.go
new file mode 100644
index 00000000..bfc47805
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/logrus_test.go
@@ -0,0 +1,361 @@
+package logrus
+
+import (
+ "bytes"
+ "encoding/json"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ log(logger)
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assertions(fields)
+}
+
+func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
+ var buffer bytes.Buffer
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = &TextFormatter{
+ DisableColors: true,
+ }
+
+ log(logger)
+
+ fields := make(map[string]string)
+ for _, kv := range strings.Split(buffer.String(), " ") {
+ if !strings.Contains(kv, "=") {
+ continue
+ }
+ kvArr := strings.Split(kv, "=")
+ key := strings.TrimSpace(kvArr[0])
+ val := kvArr[1]
+ if kvArr[1][0] == '"' {
+ var err error
+ val, err = strconv.Unquote(val)
+ assert.NoError(t, err)
+ }
+ fields[key] = val
+ }
+ assertions(fields)
+}
+
+func TestPrint(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Print("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestInfo(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "info")
+ })
+}
+
+func TestWarn(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Warn("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["level"], "warning")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test test")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test 10")
+ })
+}
+
+func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Infoln(10, 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "10 10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", 10)
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test10")
+ })
+}
+
+func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.Info("test", "test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "testtest")
+ })
+}
+
+func TestWithFieldsShouldAllowAssignments(t *testing.T) {
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ localLog := logger.WithFields(Fields{
+ "key1": "value1",
+ })
+
+ localLog.WithField("key2", "value2").Info("test")
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ assert.Equal(t, "value2", fields["key2"])
+ assert.Equal(t, "value1", fields["key1"])
+
+ buffer = bytes.Buffer{}
+ fields = Fields{}
+ localLog.Info("test")
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.Nil(t, err)
+
+ _, ok := fields["key2"]
+ assert.Equal(t, false, ok)
+ assert.Equal(t, "value1", fields["key1"])
+}
+
+func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ })
+}
+
+func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("msg", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["msg"], "test")
+ assert.Equal(t, fields["fields.msg"], "hello")
+ })
+}
+
+func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("time", "hello").Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["fields.time"], "hello")
+ })
+}
+
+func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
+ LogAndAssertJSON(t, func(log *Logger) {
+ log.WithField("level", 1).Info("test")
+ }, func(fields Fields) {
+ assert.Equal(t, fields["level"], "info")
+ assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
+ })
+}
+
+func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
+ LogAndAssertText(t, func(log *Logger) {
+ ll := log.WithField("herp", "derp")
+ ll.Info("hello")
+ ll.Info("bye")
+ }, func(fields map[string]string) {
+ for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
+ if _, ok := fields[fieldName]; ok {
+ t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
+ }
+ }
+ })
+}
+
+func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
+
+ var buffer bytes.Buffer
+ var fields Fields
+
+ logger := New()
+ logger.Out = &buffer
+ logger.Formatter = new(JSONFormatter)
+
+ llog := logger.WithField("context", "eating raw fish")
+
+ llog.Info("looks delicious")
+
+ err := json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded first message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "looks delicious")
+ assert.Equal(t, fields["context"], "eating raw fish")
+
+ buffer.Reset()
+
+ llog.Warn("omg it is!")
+
+ err = json.Unmarshal(buffer.Bytes(), &fields)
+ assert.NoError(t, err, "should have decoded second message")
+ assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
+ assert.Equal(t, fields["msg"], "omg it is!")
+ assert.Equal(t, fields["context"], "eating raw fish")
+ assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
+
+}
+
+func TestConvertLevelToString(t *testing.T) {
+ assert.Equal(t, "debug", DebugLevel.String())
+ assert.Equal(t, "info", InfoLevel.String())
+ assert.Equal(t, "warning", WarnLevel.String())
+ assert.Equal(t, "error", ErrorLevel.String())
+ assert.Equal(t, "fatal", FatalLevel.String())
+ assert.Equal(t, "panic", PanicLevel.String())
+}
+
+func TestParseLevel(t *testing.T) {
+ l, err := ParseLevel("panic")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("PANIC")
+ assert.Nil(t, err)
+ assert.Equal(t, PanicLevel, l)
+
+ l, err = ParseLevel("fatal")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("FATAL")
+ assert.Nil(t, err)
+ assert.Equal(t, FatalLevel, l)
+
+ l, err = ParseLevel("error")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("ERROR")
+ assert.Nil(t, err)
+ assert.Equal(t, ErrorLevel, l)
+
+ l, err = ParseLevel("warn")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARN")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("warning")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("WARNING")
+ assert.Nil(t, err)
+ assert.Equal(t, WarnLevel, l)
+
+ l, err = ParseLevel("info")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("INFO")
+ assert.Nil(t, err)
+ assert.Equal(t, InfoLevel, l)
+
+ l, err = ParseLevel("debug")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("DEBUG")
+ assert.Nil(t, err)
+ assert.Equal(t, DebugLevel, l)
+
+ l, err = ParseLevel("invalid")
+ assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
+}
+
+func TestGetSetLevelRace(t *testing.T) {
+ wg := sync.WaitGroup{}
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ if i%2 == 0 {
+ SetLevel(InfoLevel)
+ } else {
+ GetLevel()
+ }
+ }(i)
+
+ }
+ wg.Wait()
+}
+
+func TestLoggingRace(t *testing.T) {
+ logger := New()
+
+ var wg sync.WaitGroup
+ wg.Add(100)
+
+ for i := 0; i < 100; i++ {
+ go func() {
+ logger.Info("info")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
+// Compile test
+func TestLogrusInterface(t *testing.T) {
+ var buffer bytes.Buffer
+ fn := func(l FieldLogger) {
+ b := l.WithField("key", "value")
+ b.Debug("Test")
+ }
+ // test logger
+ logger := New()
+ logger.Out = &buffer
+ fn(logger)
+
+ // test Entry
+ e := logger.WithField("another", "value")
+ fn(e)
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/src/github.com/sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 00000000..1960169e
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ return true
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/src/github.com/sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 00000000..5f6be4d3
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_linux.go b/vendor/src/github.com/sirupsen/logrus/terminal_linux.go
new file mode 100644
index 00000000..308160ca
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/src/github.com/sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000..329038f6
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/src/github.com/sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 00000000..a3c6f6e7
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/terminal_windows.go b/vendor/src/github.com/sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000..3727e8ad
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/text_formatter.go b/vendor/src/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 00000000..076de5da
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
+ case string:
+ if !needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if !needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", errmsg)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+}
diff --git a/vendor/src/github.com/sirupsen/logrus/text_formatter_test.go b/vendor/src/github.com/sirupsen/logrus/text_formatter_test.go
new file mode 100644
index 00000000..107703fa
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/text_formatter_test.go
@@ -0,0 +1,71 @@
+package logrus
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+ "time"
+ "strings"
+)
+
+func TestQuoting(t *testing.T) {
+ tf := &TextFormatter{DisableColors: true}
+
+ checkQuoting := func(q bool, value interface{}) {
+ b, _ := tf.Format(WithField("test", value))
+ idx := bytes.Index(b, ([]byte)("test="))
+ cont := bytes.Contains(b[idx+5:], []byte{'"'})
+ if cont != q {
+ if q {
+ t.Errorf("quoting expected for: %#v", value)
+ } else {
+ t.Errorf("quoting not expected for: %#v", value)
+ }
+ }
+ }
+
+ checkQuoting(false, "abcd")
+ checkQuoting(false, "v1.0")
+ checkQuoting(false, "1234567890")
+ checkQuoting(true, "/foobar")
+ checkQuoting(true, "x y")
+ checkQuoting(true, "x,y")
+ checkQuoting(false, errors.New("invalid"))
+ checkQuoting(true, errors.New("invalid argument"))
+}
+
+func TestTimestampFormat(t *testing.T) {
+ checkTimeStr := func(format string) {
+ customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
+ customStr, _ := customFormatter.Format(WithField("test", "test"))
+ timeStart := bytes.Index(customStr, ([]byte)("time="))
+ timeEnd := bytes.Index(customStr, ([]byte)("level="))
+ timeStr := customStr[timeStart+5 : timeEnd-1]
+ if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
+ timeStr = timeStr[1 : len(timeStr)-1]
+ }
+ if format == "" {
+ format = time.RFC3339
+ }
+ _, e := time.Parse(format, (string)(timeStr))
+ if e != nil {
+ t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
+ }
+ }
+
+ checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
+ checkTimeStr("Mon Jan _2 15:04:05 2006")
+ checkTimeStr("")
+}
+
+func TestDisableTimestampWithColoredOutput(t *testing.T) {
+ tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
+
+ b, _ := tf.Format(WithField("test", "test"))
+ if strings.Contains(string(b), "[0000]") {
+ t.Error("timestamp not expected when DisableTimestamp is true")
+ }
+}
+
+// TODO add tests for sorting etc., this requires a parser for the text
+// formatter output.
diff --git a/vendor/src/github.com/sirupsen/logrus/writer.go b/vendor/src/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 00000000..f74d2aa5
--- /dev/null
+++ b/vendor/src/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+ switch level {
+ case DebugLevel:
+ printFunc = logger.Debug
+ case InfoLevel:
+ printFunc = logger.Info
+ case WarnLevel:
+ printFunc = logger.Warn
+ case ErrorLevel:
+ printFunc = logger.Error
+ case FatalLevel:
+ printFunc = logger.Fatal
+ case PanicLevel:
+ printFunc = logger.Panic
+ default:
+ printFunc = logger.Print
+ }
+
+ go logger.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/src/golang.org/x/net/context/context.go b/vendor/src/golang.org/x/net/context/context.go
new file mode 100644
index 00000000..f143ed6a
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/context.go
@@ -0,0 +1,156 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/src/golang.org/x/net/context/context_test.go b/vendor/src/golang.org/x/net/context/context_test.go
new file mode 100644
index 00000000..62844131
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/context_test.go
@@ -0,0 +1,583 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "fmt"
+ "math/rand"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+// otherContext is a Context that's not one of the types defined in context.go.
+// This lets us test code paths that differ based on the underlying type of the
+// Context.
+type otherContext struct {
+ Context
+}
+
+func TestBackground(t *testing.T) {
+ c := Background()
+ if c == nil {
+ t.Fatalf("Background returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.Background"; got != want {
+ t.Errorf("Background().String() = %q want %q", got, want)
+ }
+}
+
+func TestTODO(t *testing.T) {
+ c := TODO()
+ if c == nil {
+ t.Fatalf("TODO returned nil")
+ }
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if got, want := fmt.Sprint(c), "context.TODO"; got != want {
+ t.Errorf("TODO().String() = %q want %q", got, want)
+ }
+}
+
+func TestWithCancel(t *testing.T) {
+ c1, cancel := WithCancel(Background())
+
+ if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
+ t.Errorf("c1.String() = %q want %q", got, want)
+ }
+
+ o := otherContext{c1}
+ c2, _ := WithCancel(o)
+ contexts := []Context{c1, o, c2}
+
+ for i, c := range contexts {
+ if d := c.Done(); d == nil {
+ t.Errorf("c[%d].Done() == %v want non-nil", i, d)
+ }
+ if e := c.Err(); e != nil {
+ t.Errorf("c[%d].Err() == %v want nil", i, e)
+ }
+
+ select {
+ case x := <-c.Done():
+ t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ }
+
+ cancel()
+ time.Sleep(100 * time.Millisecond) // let cancelation propagate
+
+ for i, c := range contexts {
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
+ }
+ }
+}
+
+func TestParentFinishesChild(t *testing.T) {
+ // Context tree:
+ // parent -> cancelChild
+ // parent -> valueChild -> timerChild
+ parent, cancel := WithCancel(Background())
+ cancelChild, stop := WithCancel(parent)
+ defer stop()
+ valueChild := WithValue(parent, "key", "value")
+ timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
+ defer stop()
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-cancelChild.Done():
+ t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
+ case x := <-timerChild.Done():
+ t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
+ case x := <-valueChild.Done():
+ t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ // The parent's children should contain the two cancelable children.
+ pc := parent.(*cancelCtx)
+ cc := cancelChild.(*cancelCtx)
+ tc := timerChild.(*timerCtx)
+ pc.mu.Lock()
+ if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
+ t.Errorf("bad linkage: pc.children = %v, want %v and %v",
+ pc.children, cc, tc)
+ }
+ pc.mu.Unlock()
+
+ if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+ if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
+ t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
+ }
+
+ cancel()
+
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+
+ // parent and children should all be finished.
+ check := func(ctx Context, name string) {
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
+ }
+ if e := ctx.Err(); e != Canceled {
+ t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
+ }
+ }
+ check(parent, "parent")
+ check(cancelChild, "cancelChild")
+ check(valueChild, "valueChild")
+ check(timerChild, "timerChild")
+
+ // WithCancel should return a canceled context on a canceled parent.
+ precanceledChild := WithValue(parent, "key", "value")
+ select {
+ case <-precanceledChild.Done():
+ default:
+ t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
+ }
+ if e := precanceledChild.Err(); e != Canceled {
+ t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
+ }
+}
+
+func TestChildFinishesFirst(t *testing.T) {
+ cancelable, stop := WithCancel(Background())
+ defer stop()
+ for _, parent := range []Context{Background(), cancelable} {
+ child, cancel := WithCancel(parent)
+
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ case x := <-child.Done():
+ t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+
+ cc := child.(*cancelCtx)
+ pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
+ if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
+ t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
+ }
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 1 || !pc.children[cc] {
+ t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
+ }
+ pc.mu.Unlock()
+ }
+
+ cancel()
+
+ if pcok {
+ pc.mu.Lock()
+ if len(pc.children) != 0 {
+ t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
+ }
+ pc.mu.Unlock()
+ }
+
+ // child should be finished.
+ select {
+ case <-child.Done():
+ default:
+ t.Errorf("<-child.Done() blocked, but shouldn't have")
+ }
+ if e := child.Err(); e != Canceled {
+ t.Errorf("child.Err() == %v want %v", e, Canceled)
+ }
+
+ // parent should not be finished.
+ select {
+ case x := <-parent.Done():
+ t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
+ default:
+ }
+ if e := parent.Err(); e != nil {
+ t.Errorf("parent.Err() == %v want nil", e)
+ }
+ }
+}
+
+func testDeadline(c Context, wait time.Duration, t *testing.T) {
+ select {
+ case <-time.After(wait):
+ t.Fatalf("context should have timed out")
+ case <-c.Done():
+ }
+ if e := c.Err(); e != DeadlineExceeded {
+ t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
+ }
+}
+
+func TestDeadline(t *testing.T) {
+ t.Parallel()
+ const timeUnit = 500 * time.Millisecond
+ c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit))
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 2*timeUnit, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))
+ o := otherContext{c}
+ testDeadline(o, 2*timeUnit, t)
+
+ c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit))
+ o = otherContext{c}
+ c, _ = WithDeadline(o, time.Now().Add(3*timeUnit))
+ testDeadline(c, 2*timeUnit, t)
+}
+
+func TestTimeout(t *testing.T) {
+ t.Parallel()
+ const timeUnit = 500 * time.Millisecond
+ c, _ := WithTimeout(Background(), 1*timeUnit)
+ if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
+ t.Errorf("c.String() = %q want prefix %q", got, prefix)
+ }
+ testDeadline(c, 2*timeUnit, t)
+
+ c, _ = WithTimeout(Background(), 1*timeUnit)
+ o := otherContext{c}
+ testDeadline(o, 2*timeUnit, t)
+
+ c, _ = WithTimeout(Background(), 1*timeUnit)
+ o = otherContext{c}
+ c, _ = WithTimeout(o, 3*timeUnit)
+ testDeadline(c, 2*timeUnit, t)
+}
+
+func TestCanceledTimeout(t *testing.T) {
+ t.Parallel()
+ const timeUnit = 500 * time.Millisecond
+ c, _ := WithTimeout(Background(), 2*timeUnit)
+ o := otherContext{c}
+ c, cancel := WithTimeout(o, 4*timeUnit)
+ cancel()
+ time.Sleep(1 * timeUnit) // let cancelation propagate
+ select {
+ case <-c.Done():
+ default:
+ t.Errorf("<-c.Done() blocked, but shouldn't have")
+ }
+ if e := c.Err(); e != Canceled {
+ t.Errorf("c.Err() == %v want %v", e, Canceled)
+ }
+}
+
+type key1 int
+type key2 int
+
+var k1 = key1(1)
+var k2 = key2(1) // same int as k1, different type
+var k3 = key2(3) // same type as k2, different int
+
+func TestValues(t *testing.T) {
+ check := func(c Context, nm, v1, v2, v3 string) {
+ if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
+ t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
+ }
+ if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
+ t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
+ }
+ if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
+ t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
+ }
+ }
+
+ c0 := Background()
+ check(c0, "c0", "", "", "")
+
+ c1 := WithValue(Background(), k1, "c1k1")
+ check(c1, "c1", "c1k1", "", "")
+
+ if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
+ t.Errorf("c.String() = %q want %q", got, want)
+ }
+
+ c2 := WithValue(c1, k2, "c2k2")
+ check(c2, "c2", "c1k1", "c2k2", "")
+
+ c3 := WithValue(c2, k3, "c3k3")
+ check(c3, "c2", "c1k1", "c2k2", "c3k3")
+
+ c4 := WithValue(c3, k1, nil)
+ check(c4, "c4", "", "c2k2", "c3k3")
+
+ o0 := otherContext{Background()}
+ check(o0, "o0", "", "", "")
+
+ o1 := otherContext{WithValue(Background(), k1, "c1k1")}
+ check(o1, "o1", "c1k1", "", "")
+
+ o2 := WithValue(o1, k2, "o2k2")
+ check(o2, "o2", "c1k1", "o2k2", "")
+
+ o3 := otherContext{c4}
+ check(o3, "o3", "", "c2k2", "c3k3")
+
+ o4 := WithValue(o3, k3, nil)
+ check(o4, "o4", "", "c2k2", "")
+}
+
+func TestAllocs(t *testing.T) {
+ bg := Background()
+ for _, test := range []struct {
+ desc string
+ f func()
+ limit float64
+ gccgoLimit float64
+ }{
+ {
+ desc: "Background()",
+ f: func() { Background() },
+ limit: 0,
+ gccgoLimit: 0,
+ },
+ {
+ desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
+ f: func() {
+ c := WithValue(bg, k1, nil)
+ c.Value(k1)
+ },
+ limit: 3,
+ gccgoLimit: 3,
+ },
+ {
+ desc: "WithTimeout(bg, 15*time.Millisecond)",
+ f: func() {
+ c, _ := WithTimeout(bg, 15*time.Millisecond)
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 16,
+ },
+ {
+ desc: "WithCancel(bg)",
+ f: func() {
+ c, cancel := WithCancel(bg)
+ cancel()
+ <-c.Done()
+ },
+ limit: 5,
+ gccgoLimit: 8,
+ },
+ {
+ desc: "WithTimeout(bg, 100*time.Millisecond)",
+ f: func() {
+ c, cancel := WithTimeout(bg, 100*time.Millisecond)
+ cancel()
+ <-c.Done()
+ },
+ limit: 8,
+ gccgoLimit: 25,
+ },
+ } {
+ limit := test.limit
+ if runtime.Compiler == "gccgo" {
+ // gccgo does not yet do escape analysis.
+ // TODO(iant): Remove this when gccgo does do escape analysis.
+ limit = test.gccgoLimit
+ }
+ if n := testing.AllocsPerRun(100, test.f); n > limit {
+ t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
+ }
+ }
+}
+
+func TestSimultaneousCancels(t *testing.T) {
+ root, cancel := WithCancel(Background())
+ m := map[Context]CancelFunc{root: cancel}
+ q := []Context{root}
+ // Create a tree of contexts.
+ for len(q) != 0 && len(m) < 100 {
+ parent := q[0]
+ q = q[1:]
+ for i := 0; i < 4; i++ {
+ ctx, cancel := WithCancel(parent)
+ m[ctx] = cancel
+ q = append(q, ctx)
+ }
+ }
+ // Start all the cancels in a random order.
+ var wg sync.WaitGroup
+ wg.Add(len(m))
+ for _, cancel := range m {
+ go func(cancel CancelFunc) {
+ cancel()
+ wg.Done()
+ }(cancel)
+ }
+ // Wait on all the contexts in a random order.
+ for ctx := range m {
+ select {
+ case <-ctx.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
+ }
+ }
+ // Wait for all the cancel functions to return.
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+ select {
+ case <-done:
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
+ }
+}
+
+func TestInterlockedCancels(t *testing.T) {
+ parent, cancelParent := WithCancel(Background())
+ child, cancelChild := WithCancel(parent)
+ go func() {
+ parent.Done()
+ cancelChild()
+ }()
+ cancelParent()
+ select {
+ case <-child.Done():
+ case <-time.After(1 * time.Second):
+ buf := make([]byte, 10<<10)
+ n := runtime.Stack(buf, true)
+ t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
+ }
+}
+
+func TestLayersCancel(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), false)
+}
+
+func TestLayersTimeout(t *testing.T) {
+ testLayers(t, time.Now().UnixNano(), true)
+}
+
+func testLayers(t *testing.T, seed int64, testTimeout bool) {
+ rand.Seed(seed)
+ errorf := func(format string, a ...interface{}) {
+ t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
+ }
+ const (
+ timeout = 200 * time.Millisecond
+ minLayers = 30
+ )
+ type value int
+ var (
+ vals []*value
+ cancels []CancelFunc
+ numTimers int
+ ctx = Background()
+ )
+ for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
+ switch rand.Intn(3) {
+ case 0:
+ v := new(value)
+ ctx = WithValue(ctx, v, v)
+ vals = append(vals, v)
+ case 1:
+ var cancel CancelFunc
+ ctx, cancel = WithCancel(ctx)
+ cancels = append(cancels, cancel)
+ case 2:
+ var cancel CancelFunc
+ ctx, cancel = WithTimeout(ctx, timeout)
+ cancels = append(cancels, cancel)
+ numTimers++
+ }
+ }
+ checkValues := func(when string) {
+ for _, key := range vals {
+ if val := ctx.Value(key).(*value); key != val {
+ errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
+ }
+ }
+ }
+ select {
+ case <-ctx.Done():
+ errorf("ctx should not be canceled yet")
+ default:
+ }
+ if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
+ t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
+ }
+ t.Log(ctx)
+ checkValues("before cancel")
+ if testTimeout {
+ select {
+ case <-ctx.Done():
+ case <-time.After(timeout + 100*time.Millisecond):
+ errorf("ctx should have timed out")
+ }
+ checkValues("after timeout")
+ } else {
+ cancel := cancels[rand.Intn(len(cancels))]
+ cancel()
+ select {
+ case <-ctx.Done():
+ default:
+ errorf("ctx should be canceled")
+ }
+ checkValues("after cancel")
+ }
+}
+
+func TestCancelRemoves(t *testing.T) {
+ checkChildren := func(when string, ctx Context, want int) {
+ if got := len(ctx.(*cancelCtx).children); got != want {
+ t.Errorf("%s: context has %d children, want %d", when, got, want)
+ }
+ }
+
+ ctx, _ := WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel := WithCancel(ctx)
+ checkChildren("with WithCancel child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithCancel child", ctx, 0)
+
+ ctx, _ = WithCancel(Background())
+ checkChildren("after creation", ctx, 0)
+ _, cancel = WithTimeout(ctx, 60*time.Minute)
+ checkChildren("with WithTimeout child ", ctx, 1)
+ cancel()
+ checkChildren("after cancelling WithTimeout child", ctx, 0)
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 00000000..606cf1f9
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+ resp, err := client.Do(req.WithContext(ctx))
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ default:
+ }
+ }
+ return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
new file mode 100644
index 00000000..9f0f90f1
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_17_test.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,go1.7
+
+package ctxhttp
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "context"
+)
+
+func TestGo17Context(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ }))
+ ctx := context.Background()
+ resp, err := Get(ctx, http.DefaultClient, ts.URL)
+ if resp == nil || err != nil {
+ t.Fatalf("error received from client: %v %v", err, resp)
+ }
+ resp.Body.Close()
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
new file mode 100644
index 00000000..926870cc
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+ testHookContextDoneBeforeHeaders = nop
+ testHookDoReturned = nop
+ testHookDidBodyClose = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // TODO(djd): Respect any existing value of req.Cancel.
+ cancel := make(chan struct{})
+ req.Cancel = cancel
+
+ type responseAndError struct {
+ resp *http.Response
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ // Make local copies of test hooks closed over by goroutines below.
+ // Prevents data races in tests.
+ testHookDoReturned := testHookDoReturned
+ testHookDidBodyClose := testHookDidBodyClose
+
+ go func() {
+ resp, err := client.Do(req)
+ testHookDoReturned()
+ result <- responseAndError{resp, err}
+ }()
+
+ var resp *http.Response
+
+ select {
+ case <-ctx.Done():
+ testHookContextDoneBeforeHeaders()
+ close(cancel)
+ // Clean up after the goroutine calling client.Do:
+ go func() {
+ if r := <-result; r.resp != nil {
+ testHookDidBodyClose()
+ r.resp.Body.Close()
+ }
+ }()
+ return nil, ctx.Err()
+ case r := <-result:
+ var err error
+ resp, err = r.resp, r.err
+ if err != nil {
+ return resp, err
+ }
+ }
+
+ c := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ close(cancel)
+ case <-c:
+ // The response's Body is closed.
+ }
+ }()
+ resp.Body = ¬ifyingReader{resp.Body, c}
+
+ return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+ req, err := http.NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+ req, err := http.NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+ return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+ io.ReadCloser
+ notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+ n, err := r.ReadCloser.Read(p)
+ if err != nil && r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return n, err
+}
+
+func (r *notifyingReader) Close() error {
+ err := r.ReadCloser.Close()
+ if r.notify != nil {
+ close(r.notify)
+ r.notify = nil
+ }
+ return err
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
new file mode 100644
index 00000000..9159cf02
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_pre17_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!go1.7
+
+package ctxhttp
+
+import (
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "sync"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// golang.org/issue/14065
+func TestClosesResponseBodyOnCancel(t *testing.T) {
+ defer func() { testHookContextDoneBeforeHeaders = nop }()
+ defer func() { testHookDoReturned = nop }()
+ defer func() { testHookDidBodyClose = nop }()
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
+ defer ts.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // closed when Do enters select case <-ctx.Done()
+ enteredDonePath := make(chan struct{})
+
+ testHookContextDoneBeforeHeaders = func() {
+ close(enteredDonePath)
+ }
+
+ testHookDoReturned = func() {
+ // We now have the result (the Flush'd headers) at least,
+ // so we can cancel the request.
+ cancel()
+
+ // But block the client.Do goroutine from sending
+ // until Do enters into the <-ctx.Done() path, since
+ // otherwise if both channels are readable, select
+ // picks a random one.
+ <-enteredDonePath
+ }
+
+ sawBodyClose := make(chan struct{})
+ testHookDidBodyClose = func() { close(sawBodyClose) }
+
+ tr := &http.Transport{}
+ defer tr.CloseIdleConnections()
+ c := &http.Client{Transport: tr}
+ req, _ := http.NewRequest("GET", ts.URL, nil)
+ _, doErr := Do(ctx, c, req)
+
+ select {
+ case <-sawBodyClose:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for body to close")
+ }
+
+ if doErr != ctx.Err() {
+ t.Errorf("Do error = %v; want %v", doErr, ctx.Err())
+ }
+}
+
+type noteCloseConn struct {
+ net.Conn
+ onceClose sync.Once
+ closefn func()
+}
+
+func (c *noteCloseConn) Close() error {
+ c.onceClose.Do(c.closefn)
+ return c.Conn.Close()
+}
diff --git a/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
new file mode 100644
index 00000000..1e415518
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package ctxhttp
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ requestDuration = 100 * time.Millisecond
+ requestBody = "ok"
+)
+
+func okHandler(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(requestDuration)
+ io.WriteString(w, requestBody)
+}
+
+func TestNoTimeout(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(okHandler))
+ defer ts.Close()
+
+ ctx := context.Background()
+ res, err := Get(ctx, nil, ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ slurp, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(slurp) != requestBody {
+ t.Errorf("body = %q; want %q", slurp, requestBody)
+ }
+}
+
+func TestCancelBeforeHeaders(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ blockServer := make(chan struct{})
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ cancel()
+ <-blockServer
+ io.WriteString(w, requestBody)
+ }))
+ defer ts.Close()
+ defer close(blockServer)
+
+ res, err := Get(ctx, nil, ts.URL)
+ if err == nil {
+ res.Body.Close()
+ t.Fatal("Get returned unexpected nil error")
+ }
+ if err != context.Canceled {
+ t.Errorf("err = %v; want %v", err, context.Canceled)
+ }
+}
+
+func TestCancelAfterHangingRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+ <-w.(http.CloseNotifier).CloseNotify()
+ }))
+ defer ts.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ resp, err := Get(ctx, nil, ts.URL)
+ if err != nil {
+ t.Fatalf("unexpected error in Get: %v", err)
+ }
+
+ // Cancel befer reading the body.
+ // Reading Request.Body should fail, since the request was
+ // canceled before anything was written.
+ cancel()
+
+ done := make(chan struct{})
+
+ go func() {
+ b, err := ioutil.ReadAll(resp.Body)
+ if len(b) != 0 || err == nil {
+ t.Errorf(`Read got (%q, %v); want ("", error)`, b, err)
+ }
+ close(done)
+ }()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("Test timed out")
+ case <-done:
+ }
+}
diff --git a/vendor/src/golang.org/x/net/context/go17.go b/vendor/src/golang.org/x/net/context/go17.go
new file mode 100644
index 00000000..d20f52b7
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/src/golang.org/x/net/context/pre_go17.go b/vendor/src/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 00000000..0f35592d
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/src/golang.org/x/net/context/withtimeout_test.go b/vendor/src/golang.org/x/net/context/withtimeout_test.go
new file mode 100644
index 00000000..a6754dc3
--- /dev/null
+++ b/vendor/src/golang.org/x/net/context/withtimeout_test.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context_test
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+func ExampleWithTimeout() {
+ // Pass a context with a timeout to tell a blocking function that it
+ // should abandon its work after the timeout elapses.
+ ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ select {
+ case <-time.After(200 * time.Millisecond):
+ fmt.Println("overslept")
+ case <-ctx.Done():
+ fmt.Println(ctx.Err()) // prints "context deadline exceeded"
+ }
+ // Output:
+ // context deadline exceeded
+}
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/LICENSE b/vendor/src/gopkg.in/airbrake/gobrake.v2/LICENSE
new file mode 100644
index 00000000..d64c10ef
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014 The Gobrake Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/Makefile b/vendor/src/gopkg.in/airbrake/gobrake.v2/Makefile
new file mode 100644
index 00000000..161c4fd0
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/Makefile
@@ -0,0 +1,4 @@
+all:
+ go test ./...
+ go test ./... -short -race
+ go vet
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/README.md b/vendor/src/gopkg.in/airbrake/gobrake.v2/README.md
new file mode 100644
index 00000000..05e5739a
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/README.md
@@ -0,0 +1,47 @@
+# Airbrake Golang Notifier [![Build Status](https://travis-ci.org/airbrake/gobrake.svg?branch=v2)](https://travis-ci.org/airbrake/gobrake)
+
+
+
+# Example
+
+```go
+package main
+
+import (
+ "errors"
+
+ "gopkg.in/airbrake/gobrake.v2"
+)
+
+var airbrake = gobrake.NewNotifier(1234567, "FIXME")
+
+func init() {
+ airbrake.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {
+ notice.Context["environment"] = "production"
+ return notice
+ })
+}
+
+func main() {
+ defer airbrake.Close()
+ defer airbrake.NotifyOnPanic()
+
+ airbrake.Notify(errors.New("operation failed"), nil)
+}
+```
+
+## Ignoring notices
+
+```go
+airbrake.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {
+ if notice.Context["environment"] == "development" {
+ // Ignore notices in development environment.
+ return nil
+ }
+ return notice
+})
+```
+
+## Logging
+
+You can use [glog fork](https://github.com/airbrake/glog) to send your logs to Airbrake.
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/bench_test.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/bench_test.go
new file mode 100644
index 00000000..01657893
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/bench_test.go
@@ -0,0 +1,37 @@
+package gobrake_test
+
+import (
+ "errors"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "gopkg.in/airbrake/gobrake.v2"
+)
+
+func BenchmarkSendNotice(b *testing.B) {
+ handler := func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ w.Write([]byte(`{"id":"123"}`))
+ }
+ server := httptest.NewServer(http.HandlerFunc(handler))
+
+ notifier := gobrake.NewNotifier(1, "key")
+ notifier.SetHost(server.URL)
+
+ notice := notifier.Notice(errors.New("benchmark"), nil, 0)
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ id, err := notifier.SendNotice(notice)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if id != "123" {
+ b.Fatalf("got %q, wanted 123", id)
+ }
+ }
+ })
+}
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/gobrake.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/gobrake.go
new file mode 100644
index 00000000..23efe414
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/gobrake.go
@@ -0,0 +1,16 @@
+package gobrake
+
+import (
+ "log"
+ "os"
+)
+
+var logger *log.Logger
+
+func init() {
+ SetLogger(log.New(os.Stderr, "gobrake: ", log.LstdFlags))
+}
+
+func SetLogger(l *log.Logger) {
+ logger = l
+}
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/notice.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/notice.go
new file mode 100644
index 00000000..06bc771a
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/notice.go
@@ -0,0 +1,105 @@
+package gobrake
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+var defaultContext map[string]interface{}
+
+func getDefaultContext() map[string]interface{} {
+ if defaultContext != nil {
+ return defaultContext
+ }
+
+ defaultContext = map[string]interface{}{
+ "notifier": map[string]interface{}{
+ "name": "gobrake",
+ "version": "2.0.4",
+ "url": "https://github.com/airbrake/gobrake",
+ },
+
+ "language": runtime.Version(),
+ "os": runtime.GOOS,
+ "architecture": runtime.GOARCH,
+ }
+ if s, err := os.Hostname(); err == nil {
+ defaultContext["hostname"] = s
+ }
+ if s := os.Getenv("GOPATH"); s != "" {
+ list := filepath.SplitList(s)
+ // TODO: multiple root dirs?
+ defaultContext["rootDirectory"] = list[0]
+ }
+ return defaultContext
+}
+
+type Error struct {
+ Type string `json:"type"`
+ Message string `json:"message"`
+ Backtrace []StackFrame `json:"backtrace"`
+}
+
+type Notice struct {
+ Errors []Error `json:"errors"`
+ Context map[string]interface{} `json:"context"`
+ Env map[string]interface{} `json:"environment"`
+ Session map[string]interface{} `json:"session"`
+ Params map[string]interface{} `json:"params"`
+}
+
+func (n *Notice) String() string {
+ if len(n.Errors) == 0 {
+ return "Notice"
+ }
+ e := n.Errors[0]
+ return fmt.Sprintf("Notice<%s: %s>", e.Type, e.Message)
+}
+
+func NewNotice(e interface{}, req *http.Request, depth int) *Notice {
+ notice := &Notice{
+ Errors: []Error{{
+ Type: fmt.Sprintf("%T", e),
+ Message: fmt.Sprint(e),
+ Backtrace: stack(depth),
+ }},
+ Context: map[string]interface{}{},
+ Env: map[string]interface{}{},
+ Session: map[string]interface{}{},
+ Params: map[string]interface{}{},
+ }
+
+ for k, v := range getDefaultContext() {
+ notice.Context[k] = v
+ }
+
+ if req != nil {
+ notice.Context["url"] = req.URL.String()
+ if ua := req.Header.Get("User-Agent"); ua != "" {
+ notice.Context["userAgent"] = ua
+ }
+
+ for k, v := range req.Header {
+ if len(v) == 1 {
+ notice.Env[k] = v[0]
+ } else {
+ notice.Env[k] = v
+ }
+ }
+
+ if err := req.ParseForm(); err == nil {
+ for k, v := range req.Form {
+ if len(v) == 1 {
+ notice.Params[k] = v[0]
+ } else {
+ notice.Params[k] = v
+ }
+ }
+ }
+ }
+
+ return notice
+}
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier.go
new file mode 100644
index 00000000..e409321f
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier.go
@@ -0,0 +1,280 @@
+package gobrake // import "gopkg.in/airbrake/gobrake.v2"
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+)
+
+const defaultAirbrakeHost = "https://airbrake.io"
+const waitTimeout = 5 * time.Second
+const httpStatusTooManyRequests = 429
+
+var (
+ errClosed = errors.New("gobrake: notifier is closed")
+ errRateLimited = errors.New("gobrake: rate limited")
+)
+
+var httpClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 15 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: &tls.Config{
+ ClientSessionCache: tls.NewLRUClientSessionCache(1024),
+ },
+ MaxIdleConnsPerHost: 10,
+ ResponseHeaderTimeout: 10 * time.Second,
+ },
+ Timeout: 10 * time.Second,
+}
+
+var buffers = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+type filter func(*Notice) *Notice
+
+type Notifier struct {
+ // http.Client that is used to interact with Airbrake API.
+ Client *http.Client
+
+ projectId int64
+ projectKey string
+ createNoticeURL string
+
+ filters []filter
+
+ wg sync.WaitGroup
+ noticeCh chan *Notice
+ closed chan struct{}
+}
+
+func NewNotifier(projectId int64, projectKey string) *Notifier {
+ n := &Notifier{
+ Client: httpClient,
+
+ projectId: projectId,
+ projectKey: projectKey,
+ createNoticeURL: getCreateNoticeURL(defaultAirbrakeHost, projectId, projectKey),
+
+ filters: []filter{noticeBacktraceFilter},
+
+ noticeCh: make(chan *Notice, 1000),
+ closed: make(chan struct{}),
+ }
+ for i := 0; i < 10; i++ {
+ go n.worker()
+ }
+ return n
+}
+
+// Sets Airbrake host name. Default is https://airbrake.io.
+func (n *Notifier) SetHost(h string) {
+ n.createNoticeURL = getCreateNoticeURL(h, n.projectId, n.projectKey)
+}
+
+// AddFilter adds filter that can modify or ignore notice.
+func (n *Notifier) AddFilter(fn filter) {
+ n.filters = append(n.filters, fn)
+}
+
+// Notify notifies Airbrake about the error.
+func (n *Notifier) Notify(e interface{}, req *http.Request) {
+ notice := n.Notice(e, req, 1)
+ n.SendNoticeAsync(notice)
+}
+
+// Notice returns Aibrake notice created from error and request. depth
+// determines which call frame to use when constructing backtrace.
+func (n *Notifier) Notice(err interface{}, req *http.Request, depth int) *Notice {
+ return NewNotice(err, req, depth+3)
+}
+
+type sendResponse struct {
+ Id string `json:"id"`
+}
+
+// SendNotice sends notice to Airbrake.
+func (n *Notifier) SendNotice(notice *Notice) (string, error) {
+ for _, fn := range n.filters {
+ notice = fn(notice)
+ if notice == nil {
+ // Notice is ignored.
+ return "", nil
+ }
+ }
+
+ buf := buffers.Get().(*bytes.Buffer)
+ defer buffers.Put(buf)
+
+ buf.Reset()
+ if err := json.NewEncoder(buf).Encode(notice); err != nil {
+ return "", err
+ }
+
+ resp, err := n.Client.Post(n.createNoticeURL, "application/json", buf)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ buf.Reset()
+ _, err = buf.ReadFrom(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ if resp.StatusCode != http.StatusCreated {
+ if resp.StatusCode == httpStatusTooManyRequests {
+ return "", errRateLimited
+ }
+ err := fmt.Errorf("gobrake: got response status=%q, wanted 201 CREATED", resp.Status)
+ return "", err
+ }
+
+ var sendResp sendResponse
+ err = json.NewDecoder(buf).Decode(&sendResp)
+ if err != nil {
+ return "", err
+ }
+
+ return sendResp.Id, nil
+}
+
+func (n *Notifier) sendNotice(notice *Notice) {
+ if _, err := n.SendNotice(notice); err != nil && err != errRateLimited {
+ logger.Printf("gobrake failed reporting notice=%q: %s", notice, err)
+ }
+ n.wg.Done()
+}
+
+// SendNoticeAsync acts as SendNotice, but sends notice asynchronously
+// and pending notices can be flushed with Flush.
+func (n *Notifier) SendNoticeAsync(notice *Notice) {
+ select {
+ case <-n.closed:
+ return
+ default:
+ }
+
+ n.wg.Add(1)
+ select {
+ case n.noticeCh <- notice:
+ default:
+ n.wg.Done()
+ logger.Printf(
+ "notice=%q is ignored, because queue is full (len=%d)",
+ notice, len(n.noticeCh),
+ )
+ }
+}
+
+func (n *Notifier) worker() {
+ for {
+ select {
+ case notice := <-n.noticeCh:
+ n.sendNotice(notice)
+ case <-n.closed:
+ select {
+ case notice := <-n.noticeCh:
+ n.sendNotice(notice)
+ default:
+ return
+ }
+ }
+ }
+}
+
+// NotifyOnPanic notifies Airbrake about the panic and should be used
+// with defer statement.
+func (n *Notifier) NotifyOnPanic() {
+ if v := recover(); v != nil {
+ notice := n.Notice(v, nil, 3)
+ n.SendNotice(notice)
+ panic(v)
+ }
+}
+
+// Flush waits for pending requests to finish.
+func (n *Notifier) Flush() {
+ n.waitTimeout(waitTimeout)
+}
+
+// Deprecated. Use CloseTimeout instead.
+func (n *Notifier) WaitAndClose(timeout time.Duration) error {
+ return n.CloseTimeout(timeout)
+}
+
+// CloseTimeout waits for pending requests to finish and then closes the notifier.
+func (n *Notifier) CloseTimeout(timeout time.Duration) error {
+ select {
+ case <-n.closed:
+ default:
+ close(n.closed)
+ }
+ return n.waitTimeout(timeout)
+}
+
+func (n *Notifier) waitTimeout(timeout time.Duration) error {
+ done := make(chan struct{})
+ go func() {
+ n.wg.Wait()
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ return nil
+ case <-time.After(timeout):
+ return fmt.Errorf("Wait timed out after %s", timeout)
+ }
+}
+
+func (n *Notifier) Close() error {
+ return n.CloseTimeout(waitTimeout)
+}
+
+func getCreateNoticeURL(host string, projectId int64, key string) string {
+ return fmt.Sprintf(
+ "%s/api/v3/projects/%d/notices?key=%s",
+ host, projectId, key,
+ )
+}
+
+func noticeBacktraceFilter(notice *Notice) *Notice {
+ v, ok := notice.Context["rootDirectory"]
+ if !ok {
+ return notice
+ }
+
+ dir, ok := v.(string)
+ if !ok {
+ return notice
+ }
+
+ dir = filepath.Join(dir, "src")
+ for i := range notice.Errors {
+ replaceRootDirectory(notice.Errors[i].Backtrace, dir)
+ }
+ return notice
+}
+
+func replaceRootDirectory(backtrace []StackFrame, rootDir string) {
+ for i := range backtrace {
+ backtrace[i].File = strings.Replace(backtrace[i].File, rootDir, "[PROJECT_ROOT]", 1)
+ }
+}
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier_test.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier_test.go
new file mode 100644
index 00000000..a2b3030b
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/notifier_test.go
@@ -0,0 +1,140 @@
+package gobrake_test
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "gopkg.in/airbrake/gobrake.v2"
+)
+
+func TestGobrake(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "gobrake")
+}
+
+var _ = Describe("Notifier", func() {
+ var notifier *gobrake.Notifier
+ var sentNotice *gobrake.Notice
+
+ notify := func(e interface{}, req *http.Request) {
+ notifier.Notify(e, req)
+ notifier.Flush()
+ }
+
+ BeforeEach(func() {
+ handler := func(w http.ResponseWriter, req *http.Request) {
+ b, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ sentNotice = new(gobrake.Notice)
+ err = json.Unmarshal(b, sentNotice)
+ Expect(err).To(BeNil())
+
+ w.WriteHeader(http.StatusCreated)
+ w.Write([]byte(`{"id":"123"}`))
+ }
+ server := httptest.NewServer(http.HandlerFunc(handler))
+
+ notifier = gobrake.NewNotifier(1, "key")
+ notifier.SetHost(server.URL)
+ })
+
+ AfterEach(func() {
+ Expect(notifier.Close()).NotTo(HaveOccurred())
+ })
+
+ It("reports error and backtrace", func() {
+ notify("hello", nil)
+
+ e := sentNotice.Errors[0]
+ Expect(e.Type).To(Equal("string"))
+ Expect(e.Message).To(Equal("hello"))
+ Expect(e.Backtrace[0].File).To(Equal("[PROJECT_ROOT]/gopkg.in/airbrake/gobrake.v2/notifier_test.go"))
+ })
+
+ It("reports context, env, session and params", func() {
+ wanted := notifier.Notice("hello", nil, 3)
+ wanted.Context["context1"] = "context1"
+ wanted.Env["env1"] = "value1"
+ wanted.Session["session1"] = "value1"
+ wanted.Params["param1"] = "value1"
+
+ id, err := notifier.SendNotice(wanted)
+ Expect(err).To(BeNil())
+ Expect(id).To(Equal("123"))
+
+ Expect(sentNotice).To(Equal(wanted))
+ })
+
+ It("reports context using SetContext", func() {
+ notifier.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {
+ notice.Context["environment"] = "production"
+ return notice
+ })
+ notify("hello", nil)
+
+ Expect(sentNotice.Context["environment"]).To(Equal("production"))
+ })
+
+ It("reports request", func() {
+ u, err := url.Parse("http://foo/bar")
+ Expect(err).To(BeNil())
+
+ req := &http.Request{
+ URL: u,
+ Header: http.Header{
+ "h1": {"h1v1", "h1v2"},
+ "h2": {"h2v1"},
+ "User-Agent": {"my_user_agent"},
+ },
+ Form: url.Values{
+ "f1": {"f1v1"},
+ "f2": {"f2v1", "f2v2"},
+ },
+ }
+
+ notify("hello", req)
+
+ ctx := sentNotice.Context
+ Expect(ctx["url"]).To(Equal("http://foo/bar"))
+ Expect(ctx["userAgent"]).To(Equal("my_user_agent"))
+
+ params := sentNotice.Params
+ Expect(params["f1"]).To(Equal("f1v1"))
+ Expect(params["f2"]).To(Equal([]interface{}{"f2v1", "f2v2"}))
+
+ env := sentNotice.Env
+ Expect(env["h1"]).To(Equal([]interface{}{"h1v1", "h1v2"}))
+ Expect(env["h2"]).To(Equal("h2v1"))
+ })
+
+ It("collects and reports some context", func() {
+ notify("hello", nil)
+
+ hostname, _ := os.Hostname()
+ gopath := os.Getenv("GOPATH")
+ gopath = filepath.SplitList(gopath)[0]
+
+ Expect(sentNotice.Context["language"]).To(Equal(runtime.Version()))
+ Expect(sentNotice.Context["os"]).To(Equal(runtime.GOOS))
+ Expect(sentNotice.Context["architecture"]).To(Equal(runtime.GOARCH))
+ Expect(sentNotice.Context["hostname"]).To(Equal(hostname))
+ Expect(sentNotice.Context["rootDirectory"]).To(Equal(gopath))
+ })
+
+ It("does not panic on double close", func() {
+ Expect(notifier.Close()).NotTo(HaveOccurred())
+ })
+})
diff --git a/vendor/src/gopkg.in/airbrake/gobrake.v2/util.go b/vendor/src/gopkg.in/airbrake/gobrake.v2/util.go
new file mode 100644
index 00000000..c05c0331
--- /dev/null
+++ b/vendor/src/gopkg.in/airbrake/gobrake.v2/util.go
@@ -0,0 +1,59 @@
+package gobrake
+
+import (
+ "runtime"
+ "strings"
+)
+
+func stackFilter(packageName, funcName string, file string, line int) bool {
+ return packageName == "runtime" && funcName == "panic"
+}
+
+type StackFrame struct {
+ File string `json:"file"`
+ Line int `json:"line"`
+ Func string `json:"function"`
+}
+
+func stack(depth int) []StackFrame {
+ stack := []StackFrame{}
+ for i := depth; ; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ packageName, funcName := packageFuncName(pc)
+ if stackFilter(packageName, funcName, file, line) {
+ stack = stack[:0]
+ continue
+ }
+ stack = append(stack, StackFrame{
+ File: file,
+ Line: line,
+ Func: funcName,
+ })
+ }
+
+ return stack
+}
+
+func packageFuncName(pc uintptr) (string, string) {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "", ""
+ }
+
+ packageName := ""
+ funcName := f.Name()
+
+ if ind := strings.LastIndex(funcName, "/"); ind > 0 {
+ packageName += funcName[:ind+1]
+ funcName = funcName[ind+1:]
+ }
+ if ind := strings.Index(funcName, "."); ind > 0 {
+ packageName += funcName[:ind]
+ funcName = funcName[ind+1:]
+ }
+
+ return packageName, funcName
+}
diff --git a/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/CHANGELOG.md b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/CHANGELOG.md
new file mode 100644
index 00000000..b793a4f4
--- /dev/null
+++ b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/CHANGELOG.md
@@ -0,0 +1,14 @@
+# logrus-airbrake-hook Changelog
+
+v2.1.1 - 2016-09-22
+
+* Fix request deletion from the log entry
+
+v2.1.0 - 2016-09-22
+
+* Support ***`*http.Request` error reporting
+
+v2.0.0 - 2015-10-05
+
+* Support gobrake v2 api
+
diff --git a/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/LICENSE b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/LICENSE
new file mode 100644
index 00000000..a4282b2a
--- /dev/null
+++ b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Gemnasium
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/README.md b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/README.md
new file mode 100644
index 00000000..d61a2062
--- /dev/null
+++ b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/README.md
@@ -0,0 +1,55 @@
+# Airbrake Hook for Logrus [![Build Status](https://travis-ci.org/gemnasium/logrus-airbrake-hook.svg?branch=master)](https://travis-ci.org/gemnasium/logrus-airbrake-hook) [![godoc reference](https://godoc.org/github.com/gemnasium/logrus-airbrake-hook?status.png)](https://godoc.org/gopkg.in/gemnasium/logrus-airbrake-hook.v2)
+
+Use this hook to send your errors to [Airbrake](https://airbrake.io/).
+This hook is using the [official airbrake go package](https://github.com/airbrake/gobrake), and will hit the api V3.
+The hook is synchronous and will send the error for `log.Error`, `log.Fatal` and `log.Panic` levels.
+
+All logrus fields will be sent as context fields on Airbrake.
+
+## Usage
+
+The hook must be configured with:
+
+* A project ID (found in your your Airbrake project settings)
+* An API key ID (found in your your Airbrake project settings)
+* The name of the current environment ("development", "staging", "production", ...)
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ )
+
+func main() {
+ log := logrus.New()
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+ log.Error("some logging message")
+}
+```
+
+Note that if environment == "development", the hook will not send anything to airbrake.
+
+### Reporting http request failure
+
+
+```go
+import (
+ "log/syslog"
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ )
+
+func main() {
+ log := logrus.New()
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+req, err := http.NewRequest("GET", "http://example.com", nil)
+ log.WithField("request", req).Error("some logging message")
+}
+```
+
+Notes:
+
+* the req will be removed from the log entry
+* the name of the field doesn't matter, since it's not logged
+* if more than one request is sent, only the first will be taken into account (and the others will be logged as strings)
diff --git a/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake.go b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake.go
new file mode 100644
index 00000000..a92e01ed
--- /dev/null
+++ b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake.go
@@ -0,0 +1,71 @@
+package airbrake // import "gopkg.in/gemnasium/logrus-airbrake-hook.v2"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/airbrake/gobrake.v2"
+)
+
+// AirbrakeHook to send exceptions to an exception-tracking service compatible
+// with the Airbrake API.
+type airbrakeHook struct {
+ Airbrake *gobrake.Notifier
+}
+
+func NewHook(projectID int64, apiKey, env string) *airbrakeHook {
+ airbrake := gobrake.NewNotifier(projectID, apiKey)
+ airbrake.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice {
+ if env == "development" {
+ return nil
+ }
+ notice.Context["environment"] = env
+ return notice
+ })
+ hook := &airbrakeHook{
+ Airbrake: airbrake,
+ }
+ return hook
+}
+
+func (hook *airbrakeHook) Fire(entry *logrus.Entry) error {
+ var notifyErr error
+ err, ok := entry.Data["error"].(error)
+ if ok {
+ notifyErr = err
+ } else {
+ notifyErr = errors.New(entry.Message)
+ }
+ var req *http.Request
+ for k, v := range entry.Data {
+ if r, ok := v.(*http.Request); ok {
+ req = r
+ delete(entry.Data, k)
+ break
+ }
+ }
+ notice := hook.Airbrake.Notice(notifyErr, req, 3)
+ for k, v := range entry.Data {
+ notice.Context[k] = fmt.Sprintf("%s", v)
+ }
+
+ hook.sendNotice(notice)
+ return nil
+}
+
+func (hook *airbrakeHook) sendNotice(notice *gobrake.Notice) {
+ if _, err := hook.Airbrake.SendNotice(notice); err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to send error to Airbrake: %v\n", err)
+ }
+}
+
+func (hook *airbrakeHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.FatalLevel,
+ logrus.PanicLevel,
+ }
+}
diff --git a/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake_test.go b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake_test.go
new file mode 100644
index 00000000..951d4d9c
--- /dev/null
+++ b/vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/airbrake_test.go
@@ -0,0 +1,203 @@
+package airbrake
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "gopkg.in/airbrake/gobrake.v2"
+)
+
+type customErr struct {
+ msg string
+}
+
+func (e *customErr) Error() string {
+ return e.msg
+}
+
+const (
+ testAPIKey = "abcxyz"
+ testEnv = "development"
+ expectedClass = "*airbrake.customErr"
+ expectedMsg = "foo"
+ unintendedMsg = "Airbrake will not see this string"
+)
+
+var (
+ noticeChan = make(chan *gobrake.Notice, 1)
+)
+
+// TestLogEntryMessageReceived checks if invoking Logrus' log.Error
+// method causes an XML payload containing the log entry message is received
+// by a HTTP server emulating an Airbrake-compatible endpoint.
+func TestLogEntryMessageReceived(t *testing.T) {
+ log := logrus.New()
+ hook := newTestHook()
+ log.Hooks.Add(hook)
+
+ log.Error(expectedMsg)
+
+ select {
+ case received := <-noticeChan:
+ receivedErr := received.Errors[0]
+ if receivedErr.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", receivedErr.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryMessageReceived confirms that, when passing an error type using
+// logrus.Fields, a HTTP server emulating an Airbrake endpoint receives the
+// error message returned by the Error() method on the error interface
+// rather than the logrus.Entry.Message string.
+func TestLogEntryWithErrorReceived(t *testing.T) {
+ log := logrus.New()
+ hook := newTestHook()
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": &customErr{expectedMsg},
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeChan:
+ receivedErr := received.Errors[0]
+ if receivedErr.Message != expectedMsg {
+ t.Errorf("Unexpected message received: %s", receivedErr.Message)
+ }
+ if receivedErr.Type != expectedClass {
+ t.Errorf("Unexpected error class: %s", receivedErr.Type)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// TestLogEntryWithNonErrorTypeNotReceived confirms that, when passing a
+// non-error type using logrus.Fields, a HTTP server emulating an Airbrake
+// endpoint receives the logrus.Entry.Message string.
+//
+// Only error types are supported when setting the 'error' field using
+// logrus.WithFields().
+func TestLogEntryWithNonErrorTypeNotReceived(t *testing.T) {
+ log := logrus.New()
+ hook := newTestHook()
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "error": expectedMsg,
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeChan:
+ receivedErr := received.Errors[0]
+ if receivedErr.Message != unintendedMsg {
+ t.Errorf("Unexpected message received: %s", receivedErr.Message)
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+func TestLogEntryWithCustomFields(t *testing.T) {
+ log := logrus.New()
+ hook := newTestHook()
+ log.Hooks.Add(hook)
+
+ log.WithFields(logrus.Fields{
+ "user_id": "123",
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeChan:
+ receivedErr := received.Errors[0]
+ if receivedErr.Message != unintendedMsg {
+ t.Errorf("Unexpected message received: %s", receivedErr.Message)
+ }
+ if received.Context["user_id"] != "123" {
+ t.Errorf("Expected message to contain Context[\"user_id\"] == \"123\" got %q", received.Context["user_id"])
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+func TestLogEntryWithHTTPRequestFields(t *testing.T) {
+ log := logrus.New()
+ hook := newTestHook()
+ log.Hooks.Add(hook)
+
+ req, err := http.NewRequest("GET", "http://example.com", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.WithFields(logrus.Fields{
+ "user_id": "123",
+ "request": req,
+ }).Error(unintendedMsg)
+
+ select {
+ case received := <-noticeChan:
+ receivedErr := received.Errors[0]
+ if receivedErr.Message != unintendedMsg {
+ t.Errorf("Unexpected message received: %s", receivedErr.Message)
+ }
+ if received.Context["user_id"] != "123" {
+ t.Errorf("Expected message to contain Context[\"user_id\"] == \"123\" got %q", received.Context["user_id"])
+ }
+ if received.Context["url"] != "http://example.com" {
+ t.Errorf("Expected message to contain Context[\"url\"] == \"http://example.com\" got %q", received.Context["url"])
+ }
+ case <-time.After(time.Second):
+ t.Error("Timed out; no notice received by Airbrake API")
+ }
+}
+
+// Returns a new airbrakeHook with the test server proxied
+func newTestHook() *airbrakeHook {
+ // Make a http.Client with the transport
+ httpClient := &http.Client{Transport: &FakeRoundTripper{}}
+
+ hook := NewHook(123, testAPIKey, "production")
+ hook.Airbrake.Client = httpClient
+ return hook
+}
+
+// gobrake API doesn't allow to override endpoint, we need a http.Roundtripper
+type FakeRoundTripper struct {
+}
+
+func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ notice := &gobrake.Notice{}
+ err = json.Unmarshal(b, notice)
+ if err != nil {
+ panic(err)
+ }
+
+ noticeChan <- notice
+
+ jsonResponse := struct {
+ Id string `json:"id"`
+ }{"1"}
+
+ sendResponse, _ := json.Marshal(jsonResponse)
+ res := &http.Response{
+ StatusCode: http.StatusCreated,
+ Body: ioutil.NopCloser(bytes.NewReader(sendResponse)),
+ Header: make(http.Header),
+ }
+ return res, nil
+}