diff --git a/.travis.yml b/.travis.yml
index 4c7b77b1..1bedc942 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,14 +5,22 @@ language: go
go:
- 1.9.x
- 1.10.x
-- master
env:
- MYSQL_IMAGE=mysql/mysql-server:5.5
- MYSQL_IMAGE=mysql/mysql-server:5.6
- MYSQL_IMAGE=mysql/mysql-server:5.7
- MYSQL_IMAGE=mysql/mysql-server:8.0
+- MYSQL_IMAGE=mariadb:5.5
+- MYSQL_IMAGE=mariadb:10.0
+- MYSQL_IMAGE=mariadb:10.1
+- MYSQL_IMAGE=mariadb:10.2
+- MYSQL_IMAGE=mariadb:10.3
+- MYSQL_IMAGE=percona/percona-server:5.6
- MYSQL_IMAGE=percona/percona-server:5.7
+- MYSQL_IMAGE=percona:5.5
+- MYSQL_IMAGE=percona:5.6
+- MYSQL_IMAGE=percona:5.7
services:
- docker
diff --git a/Gopkg.lock b/Gopkg.lock
new file mode 100644
index 00000000..d79654a8
--- /dev/null
+++ b/Gopkg.lock
@@ -0,0 +1,149 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
+
+[[projects]]
+ name = "github.com/go-sql-driver/mysql"
+ packages = ["."]
+ revision = "d523deb1b23d913de5bdada721a6071e71283618"
+ version = "v1.4.0"
+
+[[projects]]
+ name = "github.com/golang/protobuf"
+ packages = ["proto"]
+ revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
+ version = "v1.1.0"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/gopherjs/gopherjs"
+ packages = ["js"]
+ revision = "8dffc02ea1cb8398bb73f30424697c60fcf8d4c5"
+
+[[projects]]
+ name = "github.com/jtolds/gls"
+ packages = ["."]
+ revision = "77f18212c9c7edc9bd6a33d383a7b545ce62f064"
+ version = "v4.2.1"
+
+[[projects]]
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
+[[projects]]
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/promhttp"
+ ]
+ revision = "c5b7fccd204277076155f10851dad72b76a49317"
+ version = "v0.8.0"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
+
+[[projects]]
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "log",
+ "model",
+ "version"
+ ]
+ revision = "185c63bfc5a8c7a703687edb52940c895c818cb5"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/util",
+ "nfs",
+ "xfs"
+ ]
+ revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
+
+[[projects]]
+ name = "github.com/sirupsen/logrus"
+ packages = ["."]
+ revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
+ version = "v1.0.5"
+
+[[projects]]
+ name = "github.com/smartystreets/assertions"
+ packages = [
+ ".",
+ "internal/go-render/render",
+ "internal/oglematchers"
+ ]
+ revision = "7678a5452ebea5b7090a6b163f844c133f523da2"
+ version = "1.8.3"
+
+[[projects]]
+ name = "github.com/smartystreets/goconvey"
+ packages = [
+ "convey",
+ "convey/gotest",
+ "convey/reporting"
+ ]
+ revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857"
+ version = "1.6.3"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/crypto"
+ packages = ["ssh/terminal"]
+ revision = "a8fb68e7206f8c78be19b432c58eb52a6aa34462"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sys"
+ packages = [
+ "unix",
+ "windows",
+ "windows/registry",
+ "windows/svc/eventlog"
+ ]
+ revision = "8014b7b116a67fea23fbb82cd834c9ad656ea44b"
+
+[[projects]]
+ name = "google.golang.org/appengine"
+ packages = ["cloudsql"]
+ revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "gopkg.in/DATA-DOG/go-sqlmock.v1"
+ packages = ["."]
+ revision = "d76b18b42f285b792bf985118980ce9eacea9d10"
+ version = "v1.3.0"
+
+[[projects]]
+ name = "gopkg.in/ini.v1"
+ packages = ["."]
+ revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
+ version = "v1.37.0"
+
+[[projects]]
+ name = "gopkg.in/yaml.v2"
+ packages = ["."]
+ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+ version = "v2.2.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ba8e3ba3779e577e225314fa6a7bad3122b6654be1afaec679241f1a86b8c869"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
new file mode 100644
index 00000000..2e0d674f
--- /dev/null
+++ b/Gopkg.toml
@@ -0,0 +1,10 @@
+[prune]
+ go-tests = true
+ non-go = true
+ unused-packages = true
+
+[[override]]
+ name = "github.com/prometheus/common"
+ # Version before introducing kingpin.
+ # https://github.com/prometheus/common/pull/96
+ revision = "185c63bfc5a8c7a703687edb52940c895c818cb5"
diff --git a/Makefile b/Makefile
index 268af1ec..00d1d28c 100644
--- a/Makefile
+++ b/Makefile
@@ -22,7 +22,7 @@ DOCKER_IMAGE_NAME ?= mysqld-exporter
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
-all: format build test-short
+all: verify-vendor format build test-short
style:
@echo ">> checking code style"
@@ -36,6 +36,13 @@ test:
@echo ">> running tests"
@$(GO) test -race $(pkgs)
+verify-vendor:
+ @echo ">> verify that vendor/ is in sync with code and Gopkg.*"
+ curl https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -L -o ~/dep && chmod +x ~/dep
+ rm -fr vendor/
+ ~/dep ensure -v -vendor-only
+ git diff --exit-code
+
format:
@echo ">> formatting code"
@$(GO) fmt $(pkgs)
diff --git a/collector/info_schema_innodb_cmp.go b/collector/info_schema_innodb_cmp.go
new file mode 100644
index 00000000..1811bb77
--- /dev/null
+++ b/collector/info_schema_innodb_cmp.go
@@ -0,0 +1,117 @@
+// Scrape `information_schema.client_statistics`.
+
+package collector
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/log"
+)
+
+const innodbCmpQuery = `
+ SELECT
+ page_size, compress_ops, compress_ops_ok, compress_time, uncompress_ops, uncompress_time
+ FROM information_schema.INNODB_CMP
+ `
+
+var (
+ // Map known innodb_cmp values to types. Unknown types will be mapped as
+ // untyped.
+ informationSchemaInnodbCmpTypes = map[string]struct {
+ vtype prometheus.ValueType
+ desc *prometheus.Desc
+ }{
+ "compress_ops": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_total"),
+ "Number of times a B-tree page of the size PAGE_SIZE has been compressed.",
+ []string{"page_size"}, nil)},
+ "compress_ops_ok": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_ops_ok_total"),
+ "Number of times a B-tree page of the size PAGE_SIZE has been successfully compressed.",
+ []string{"page_size"}, nil)},
+ "compress_time": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_compress_time_seconds_total"),
+ "Total time in seconds spent in attempts to compress B-tree pages.",
+ []string{"page_size"}, nil)},
+ "uncompress_ops": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_ops_total"),
+ "Number of times a B-tree page has been uncompressed.",
+ []string{"page_size"}, nil)},
+ "uncompress_time": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmp_uncompress_time_seconds_total"),
+ "Total time in seconds spent in uncompressing B-tree pages.",
+ []string{"page_size"}, nil)},
+ }
+)
+
+// ScrapeInnodbCmp collects from `information_schema.innodb_cmp`.
+type ScrapeInnodbCmp struct{}
+
+// Name of the Scraper.
+func (ScrapeInnodbCmp) Name() string {
+ return "info_schema.innodb_cmp"
+}
+
+// Help returns additional information about Scraper.
+func (ScrapeInnodbCmp) Help() string {
+ return "Please set next variables SET GLOBAL innodb_file_per_table=1;SET GLOBAL innodb_file_format=Barracuda;"
+}
+
+// Version of MySQL from which scraper is available.
+func (ScrapeInnodbCmp) Version() float64 {
+ return 5.5
+}
+
+// Scrape collects data.
+func (ScrapeInnodbCmp) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
+ informationSchemaInnodbCmpRows, err := db.Query(innodbCmpQuery)
+ if err != nil {
+ log.Debugln("INNODB_CMP stats are not available.")
+ return err
+ }
+ defer informationSchemaInnodbCmpRows.Close()
+
+ // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64.
+ // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics
+ // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ]
+ // To map metrics to names therefore we always range over columnNames[1:]
+ columnNames, err := informationSchemaInnodbCmpRows.Columns()
+ if err != nil {
+ log.Debugln("INNODB_CMP stats are not available.")
+ return err
+ }
+
+ var (
+ client string // Holds the client name, which should be in column 0.
+ clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column.
+ clientStatScanArgs = make([]interface{}, len(columnNames))
+ )
+
+ clientStatScanArgs[0] = &client
+ for i := range clientStatData {
+ clientStatScanArgs[i+1] = &clientStatData[i]
+ }
+
+ for informationSchemaInnodbCmpRows.Next() {
+ if err := informationSchemaInnodbCmpRows.Scan(clientStatScanArgs...); err != nil {
+ return err
+ }
+
+ // Loop over column names, and match to scan data. Unknown columns
+ // will be filled with an untyped metric number. We assume other then
+ // cient, that we'll only get numbers.
+ for idx, columnName := range columnNames[1:] {
+ if metricType, ok := informationSchemaInnodbCmpTypes[columnName]; ok {
+ ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client)
+ } else {
+ // Unknown metric. Report as untyped.
+ desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("innodb_cmp_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"page_size"}, nil)
+ ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client)
+ }
+ }
+ }
+ return nil
+}
diff --git a/collector/info_schema_innodb_cmp_test.go b/collector/info_schema_innodb_cmp_test.go
new file mode 100644
index 00000000..6fcf143a
--- /dev/null
+++ b/collector/info_schema_innodb_cmp_test.go
@@ -0,0 +1,50 @@
+package collector
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/DATA-DOG/go-sqlmock.v1"
+)
+
+func TestScrapeInnodbCmp(t *testing.T) {
+ db, mock, err := sqlmock.New()
+ if err != nil {
+ t.Fatalf("error opening a stub database connection: %s", err)
+ }
+ defer db.Close()
+
+ columns := []string{"page_size", "compress_ops", "compress_ops_ok", "compress_time", "uncompress_ops", "uncompress_time"}
+ rows := sqlmock.NewRows(columns).
+ AddRow("1024", 10, 20, 30, 40, 50)
+ mock.ExpectQuery(sanitizeQuery(innodbCmpQuery)).WillReturnRows(rows)
+
+ ch := make(chan prometheus.Metric)
+ go func() {
+ if err = (ScrapeInnodbCmp{}).Scrape(db, ch); err != nil {
+ t.Errorf("error calling function on test: %s", err)
+ }
+ close(ch)
+ }()
+
+ expected := []MetricResult{
+ {labels: labelMap{"page_size": "1024"}, value: 10, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024"}, value: 20, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024"}, value: 30, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024"}, value: 40, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024"}, value: 50, metricType: dto.MetricType_COUNTER},
+ }
+ convey.Convey("Metrics comparison", t, func() {
+ for _, expect := range expected {
+ got := readMetric(<-ch)
+ convey.So(expect, convey.ShouldResemble, got)
+ }
+ })
+
+ // Ensure all SQL queries were executed
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("there were unfulfilled expections: %s", err)
+ }
+}
diff --git a/collector/info_schema_innodb_cmpmem.go b/collector/info_schema_innodb_cmpmem.go
new file mode 100644
index 00000000..57cb5960
--- /dev/null
+++ b/collector/info_schema_innodb_cmpmem.go
@@ -0,0 +1,120 @@
+// Scrape `information_schema.innodb_cmpmem`.
+
+package collector
+
+import (
+ "database/sql"
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/log"
+)
+
+const innodbCmpMemQuery = `
+ SELECT
+ page_size, buffer_pool_instance, pages_used, pages_free, relocation_ops, relocation_time
+ FROM information_schema.INNODB_CMPMEM
+ `
+
+//Metric descriptors.
+var (
+ // Map known innodb_cmp values to types. Unknown types will be mapped as
+ // untyped.
+ informationSchemaInnodbCmpMemTypes = map[string]struct {
+ vtype prometheus.ValueType
+ desc *prometheus.Desc
+ }{
+ "pages_used": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_used_total"),
+ "Number of blocks of the size PAGE_SIZE that are currently in use.",
+ []string{"page_size", "buffer"}, nil)},
+ "pages_free": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_pages_free_total"),
+ "Number of blocks of the size PAGE_SIZE that are currently available for allocation.",
+ []string{"page_size", "buffer"}, nil)},
+ "relocation_ops": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_ops_total"),
+ "Number of times a block of the size PAGE_SIZE has been relocated.",
+ []string{"page_size", "buffer"}, nil)},
+ "relocation_time": {prometheus.CounterValue,
+ prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, "innodb_cmpmem_relocation_time_seconds_total"),
+ "Total time in microseconds spent in relocating blocks of the size PAGE_SIZE.",
+ []string{"page_size", "buffer"}, nil)},
+ }
+)
+
+// ScrapeInnodbCmpMem collects from `information_schema.innodb_cmpmem`.
+type ScrapeInnodbCmpMem struct{}
+
+// Name of the Scraper.
+func (ScrapeInnodbCmpMem) Name() string {
+ return "info_schema.innodb_cmpmem"
+}
+
+// Help returns additional information about Scraper.
+func (ScrapeInnodbCmpMem) Help() string {
+ return "Please set next variables SET GLOBAL innodb_file_per_table=1;SET GLOBAL innodb_file_format=Barracuda;"
+}
+
+// Version of MySQL from which scraper is available.
+func (ScrapeInnodbCmpMem) Version() float64 {
+ return 5.5
+}
+
+// Scrape collects data.
+func (ScrapeInnodbCmpMem) Scrape(db *sql.DB, ch chan<- prometheus.Metric) error {
+ informationSchemaInnodbCmpMemRows, err := db.Query(innodbCmpMemQuery)
+ if err != nil {
+ log.Debugln("INNODB_CMPMEM stats are not available.")
+ return err
+ }
+ defer informationSchemaInnodbCmpMemRows.Close()
+
+ // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64.
+ // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics
+ // (because clientStatScanArgs is mapped as [ &client, &buffer, &clientData[0], &clientData[1] ... &clientdata[n] ]
+ // To map metrics to names therefore we always range over columnNames[1:]
+ columnNames, err := informationSchemaInnodbCmpMemRows.Columns()
+
+ if err != nil {
+ log.Debugln("INNODB_CMPMEM stats are not available.")
+ return err
+ }
+
+ var (
+ client string // Holds the client name, which should be in column 0.
+ buffer string // Holds the buffer number, which should be in column 1.
+ clientStatData = make([]float64, len(columnNames)-2) // 2 less because of the client column.
+ clientStatScanArgs = make([]interface{}, len(columnNames))
+ )
+
+ clientStatScanArgs[0] = &client
+ clientStatScanArgs[1] = &buffer
+ for i := range clientStatData {
+ clientStatScanArgs[i+2] = &clientStatData[i]
+ }
+
+ for informationSchemaInnodbCmpMemRows.Next() {
+ if err := informationSchemaInnodbCmpMemRows.Scan(clientStatScanArgs...); err != nil {
+ return err
+ }
+ // Loop over column names, and match to scan data. Unknown columns
+ // will be filled with an untyped metric number. We assume other then
+ // cient, that we'll only get numbers.
+ for idx, columnName := range columnNames[2:] {
+ if metricType, ok := informationSchemaInnodbCmpMemTypes[columnName]; ok {
+ if columnName == "relocation_time" {
+ ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]/1000), client, buffer)
+ } else {
+ ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client, buffer)
+ }
+ } else {
+ // Unknown metric. Report as untyped.
+ desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("innodb_cmpmem_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"page_size", "buffer"}, nil)
+ ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client, buffer)
+ }
+ }
+ }
+ return nil
+}
diff --git a/collector/info_schema_innodb_cmpmem_test.go b/collector/info_schema_innodb_cmpmem_test.go
new file mode 100644
index 00000000..f4836b5f
--- /dev/null
+++ b/collector/info_schema_innodb_cmpmem_test.go
@@ -0,0 +1,49 @@
+package collector
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/DATA-DOG/go-sqlmock.v1"
+)
+
+func TestScrapeInnodbCmpMem(t *testing.T) {
+ db, mock, err := sqlmock.New()
+ if err != nil {
+ t.Fatalf("error opening a stub database connection: %s", err)
+ }
+ defer db.Close()
+
+ columns := []string{"page_size", "buffer", "pages_used", "pages_free", "relocation_ops", "relocation_time"}
+ rows := sqlmock.NewRows(columns).
+ AddRow("1024", "0", 30, 40, 50, 60)
+ mock.ExpectQuery(sanitizeQuery(innodbCmpMemQuery)).WillReturnRows(rows)
+
+ ch := make(chan prometheus.Metric)
+ go func() {
+ if err = (ScrapeInnodbCmpMem{}).Scrape(db, ch); err != nil {
+ t.Errorf("error calling function on test: %s", err)
+ }
+ close(ch)
+ }()
+
+ expected := []MetricResult{
+ {labels: labelMap{"page_size": "1024", "buffer": "0"}, value: 30, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024", "buffer": "0"}, value: 40, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024", "buffer": "0"}, value: 50, metricType: dto.MetricType_COUNTER},
+ {labels: labelMap{"page_size": "1024", "buffer": "0"}, value: 0.06, metricType: dto.MetricType_COUNTER},
+ }
+ convey.Convey("Metrics comparison", t, func() {
+ for _, expect := range expected {
+ got := readMetric(<-ch)
+ convey.So(expect, convey.ShouldResemble, got)
+ }
+ })
+
+ // Ensure all SQL queries were executed
+ if err := mock.ExpectationsWereMet(); err != nil {
+ t.Errorf("there were unfulfilled expections: %s", err)
+ }
+}
diff --git a/mysqld_exporter.go b/mysqld_exporter.go
index ecb65aa2..64abb143 100644
--- a/mysqld_exporter.go
+++ b/mysqld_exporter.go
@@ -137,6 +137,8 @@ var scrapers = map[collector.Scraper]bool{
collector.ScrapeEngineTokudbStatus{}: false,
collector.ScrapeEngineInnodbStatus{}: false,
collector.ScrapeHeartbeat{}: false,
+ collector.ScrapeInnodbCmp{}: false,
+ collector.ScrapeInnodbCmpMem{}: false,
}
var scrapersHr = map[collector.Scraper]struct{}{
@@ -152,6 +154,8 @@ var scrapersMr = map[collector.Scraper]struct{}{
collector.ScrapePerfTableLockWaits{}: {},
collector.ScrapeQueryResponseTime{}: {},
collector.ScrapeEngineInnodbStatus{}: {},
+ collector.ScrapeInnodbCmp{}: {},
+ collector.ScrapeInnodbCmpMem{}: {},
}
var scrapersLr = map[collector.Scraper]struct{}{
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
deleted file mode 100644
index cc58f645..00000000
--- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
+++ /dev/null
@@ -1,118 +0,0 @@
-# 1.0.4
-
-* Fix race when adding hooks (#612)
-* Fix terminal check in AppEngine (#635)
-
-# 1.0.3
-
-* Replace example files with testable examples
-
-# 1.0.2
-
-* bug: quote non-string values in text formatter (#583)
-* Make (*Logger) SetLevel a public method
-
-# 1.0.1
-
-* bug: fix escaping in text formatter (#575)
-
-# 1.0.0
-
-* Officially changed name to lower-case
-* bug: colors on Windows 10 (#541)
-* bug: fix race in accessing level (#512)
-
-# 0.11.5
-
-* feature: add writer and writerlevel to entry (#372)
-
-# 0.11.4
-
-* bug: fix undefined variable on solaris (#493)
-
-# 0.11.3
-
-* formatter: configure quoting of empty values (#484)
-* formatter: configure quoting character (default is `"`) (#484)
-* bug: fix not importing io correctly in non-linux environments (#481)
-
-# 0.11.2
-
-* bug: fix windows terminal detection (#476)
-
-# 0.11.1
-
-* bug: fix tty detection with custom out (#471)
-
-# 0.11.0
-
-* performance: Use bufferpool to allocate (#370)
-* terminal: terminal detection for app-engine (#343)
-* feature: exit handler (#375)
-
-# 0.10.0
-
-* feature: Add a test hook (#180)
-* feature: `ParseLevel` is now case-insensitive (#326)
-* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
-* performance: avoid re-allocations on `WithFields` (#335)
-
-# 0.9.0
-
-* logrus/text_formatter: don't emit empty msg
-* logrus/hooks/airbrake: move out of main repository
-* logrus/hooks/sentry: move out of main repository
-* logrus/hooks/papertrail: move out of main repository
-* logrus/hooks/bugsnag: move out of main repository
-* logrus/core: run tests with `-race`
-* logrus/core: detect TTY based on `stderr`
-* logrus/core: support `WithError` on logger
-* logrus/core: Solaris support
-
-# 0.8.7
-
-* logrus/core: fix possible race (#216)
-* logrus/doc: small typo fixes and doc improvements
-
-
-# 0.8.6
-
-* hooks/raven: allow passing an initialized client
-
-# 0.8.5
-
-* logrus/core: revert #208
-
-# 0.8.4
-
-* formatter/text: fix data race (#218)
-
-# 0.8.3
-
-* logrus/core: fix entry log level (#208)
-* logrus/core: improve performance of text formatter by 40%
-* logrus/core: expose `LevelHooks` type
-* logrus/core: add support for DragonflyBSD and NetBSD
-* formatter/text: print structs more verbosely
-
-# 0.8.2
-
-* logrus: fix more Fatal family functions
-
-# 0.8.1
-
-* logrus: fix not exiting on `Fatalf` and `Fatalln`
-
-# 0.8.0
-
-* logrus: defaults to stderr instead of stdout
-* hooks/sentry: add special field for `*http.Request`
-* formatter/text: ignore Windows for colors
-
-# 0.7.3
-
-* formatter/\*: allow configuration of timestamp layout
-
-# 0.7.2
-
-* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
deleted file mode 100644
index 08584b5f..00000000
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ /dev/null
@@ -1,509 +0,0 @@
-# Logrus [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
-
-Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger.
-
-**Seeing weird case-sensitive problems?** It's in the past been possible to
-import Logrus as both upper- and lower-case. Due to the Go package environment,
-this caused issues in the community and we needed a standard. Some environments
-experienced problems with the upper-case variant, so the lower-case was decided.
-Everything using `logrus` will need to use the lower-case:
-`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
-
-To fix Glide, see [these
-comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
-For an in-depth explanation of the casing issue, see [this
-comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
-
-**Are you interested in assisting in maintaining Logrus?** Currently I have a
-lot of obligations, and I am unable to provide Logrus with the maintainership it
-needs. If you'd like to help, please reach out to me at `simon at author's
-username dot com`.
-
-Nicely color-coded in development (when a TTY is attached, otherwise just
-plain text):
-
-![Colored](http://i.imgur.com/PY7qMwd.png)
-
-With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
-or Splunk:
-
-```json
-{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
-ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
-
-{"level":"warning","msg":"The group's number increased tremendously!",
-"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
-"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
-"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
-
-{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
-"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
-```
-
-With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
-attached, the output is compatible with the
-[logfmt](http://godoc.org/github.com/kr/logfmt) format:
-
-```text
-time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
-time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
-time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
-time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
-time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
-time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
-exit status 1
-```
-
-#### Case-sensitivity
-
-The organization's name was changed to lower-case--and this will not be changed
-back. If you are getting import conflicts due to case sensitivity, please use
-the lower-case import: `github.com/sirupsen/logrus`.
-
-#### Example
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
-```go
-package main
-
-import (
- log "github.com/sirupsen/logrus"
-)
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- }).Info("A walrus appears")
-}
-```
-
-Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
-and you'll now have the flexibility of Logrus. You can customize it all you
-want:
-
-```go
-package main
-
-import (
- "os"
- log "github.com/sirupsen/logrus"
-)
-
-func init() {
- // Log as JSON instead of the default ASCII formatter.
- log.SetFormatter(&log.JSONFormatter{})
-
- // Output to stdout instead of the default stderr
- // Can be any io.Writer, see below for File example
- log.SetOutput(os.Stdout)
-
- // Only log the warning severity or above.
- log.SetLevel(log.WarnLevel)
-}
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 100,
- }).Fatal("The ice breaks!")
-
- // A common pattern is to re-use fields between logging statements by re-using
- // the logrus.Entry returned from WithFields()
- contextLogger := log.WithFields(log.Fields{
- "common": "this is a common field",
- "other": "I also should be logged always",
- })
-
- contextLogger.Info("I'll be logged with common and other field")
- contextLogger.Info("Me too")
-}
-```
-
-For more advanced usage such as logging to multiple locations from the same
-application, you can also create an instance of the `logrus` Logger:
-
-```go
-package main
-
-import (
- "os"
- "github.com/sirupsen/logrus"
-)
-
-// Create a new instance of the logger. You can have any number of instances.
-var log = logrus.New()
-
-func main() {
- // The API for setting attributes is a little different than the package level
- // exported logger. See Godoc.
- log.Out = os.Stdout
-
- // You could set this to any `io.Writer` such as a file
- // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
- // if err == nil {
- // log.Out = file
- // } else {
- // log.Info("Failed to log to file, using default stderr")
- // }
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-}
-```
-
-#### Fields
-
-Logrus encourages careful, structured logging through logging fields instead of
-long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
-to send event %s to topic %s with key %d")`, you should log the much more
-discoverable:
-
-```go
-log.WithFields(log.Fields{
- "event": event,
- "topic": topic,
- "key": key,
-}).Fatal("Failed to send event")
-```
-
-We've found this API forces you to think about logging in a way that produces
-much more useful logging messages. We've been in countless situations where just
-a single added field to a log statement that was already there would've saved us
-hours. The `WithFields` call is optional.
-
-In general, with Logrus using any of the `printf`-family functions should be
-seen as a hint you should add a field, however, you can still use the
-`printf`-family functions with Logrus.
-
-#### Default Fields
-
-Often it's helpful to have fields _always_ attached to log statements in an
-application or parts of one. For example, you may want to always log the
-`request_id` and `user_ip` in the context of a request. Instead of writing
-`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
-every line, you can create a `logrus.Entry` to pass around instead:
-
-```go
-requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
-requestLogger.Info("something happened on that request") # will log request_id and user_ip
-requestLogger.Warn("something not great happened")
-```
-
-#### Hooks
-
-You can add hooks for logging levels. For example to send errors to an exception
-tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
-multiple places simultaneously, e.g. syslog.
-
-Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
-`init`:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
- "log/syslog"
-)
-
-func init() {
-
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook(123, "xyz", "production"))
-
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
- if err != nil {
- log.Error("Unable to connect to local syslog daemon")
- } else {
- log.AddHook(hook)
- }
-}
-```
-Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-
-| Hook | Description |
-| ----- | ----------- |
-| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
-| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
-| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
-| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
-| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
-| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
-| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
-| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
-| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
-| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
-| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
-| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
-| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
-| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
-| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
-| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
-| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
-| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
-| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
-| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
-| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
-| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
-| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
-| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
-| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
-| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
-| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
-| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
-| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
-| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
-| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
-| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
-| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
-| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
-| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
-| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
-| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
-| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
-| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
-| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
-| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
-| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
-| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
-| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
-
-#### Level logging
-
-Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
-
-```go
-log.Debug("Useful debugging information.")
-log.Info("Something noteworthy happened!")
-log.Warn("You should probably take a look at this.")
-log.Error("Something failed but I'm not quitting.")
-// Calls os.Exit(1) after logging
-log.Fatal("Bye.")
-// Calls panic() after logging
-log.Panic("I'm bailing.")
-```
-
-You can set the logging level on a `Logger`, then it will only log entries with
-that severity or anything above it:
-
-```go
-// Will log anything that is info or above (warn, error, fatal, panic). Default.
-log.SetLevel(log.InfoLevel)
-```
-
-It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
-environment if your application has that.
-
-#### Entries
-
-Besides the fields added with `WithField` or `WithFields` some fields are
-automatically added to all logging events:
-
-1. `time`. The timestamp when the entry was created.
-2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
- the `AddFields` call. E.g. `Failed to send event.`
-3. `level`. The logging level. E.g. `info`.
-
-#### Environments
-
-Logrus has no notion of environment.
-
-If you wish for hooks and formatters to only be used in specific environments,
-you should handle that yourself. For example, if your application has a global
-variable `Environment`, which is a string representation of the environment you
-could do:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
-)
-
-init() {
- // do something here to set environment depending on an environment variable
- // or command-line flag
- if Environment == "production" {
- log.SetFormatter(&log.JSONFormatter{})
- } else {
- // The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(&log.TextFormatter{})
- }
-}
-```
-
-This configuration is how `logrus` was intended to be used, but JSON in
-production is mostly only useful if you do log aggregation with tools like
-Splunk or Logstash.
-
-#### Formatters
-
-The built-in logging formatters are:
-
-* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
- without colors.
- * *Note:* to force colored output when there is no TTY, set the `ForceColors`
- field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`. For Windows, see
- [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
-* `logrus.JSONFormatter`. Logs fields as JSON.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
-
-Third party logging formatters:
-
-* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
-* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
-* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
-* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
-
-You can define your formatter by implementing the `Formatter` interface,
-requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
-`Fields` type (`map[string]interface{}`) with all your fields as well as the
-default ones (see Entries section above):
-
-```go
-type MyJSONFormatter struct {
-}
-
-log.SetFormatter(new(MyJSONFormatter))
-
-func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
- // Note this doesn't include Time, Level and Message which are available on
- // the Entry. Consult `godoc` on information about those fields or read the
- // source of the official loggers.
- serialized, err := json.Marshal(entry.Data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
- }
- return append(serialized, '\n'), nil
-}
-```
-
-#### Logger as an `io.Writer`
-
-Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
-
-```go
-w := logger.Writer()
-defer w.Close()
-
-srv := http.Server{
- // create a stdlib log.Logger that writes to
- // logrus.Logger.
- ErrorLog: log.New(w, "", 0),
-}
-```
-
-Each line written to that writer will be printed the usual way, using formatters
-and hooks. The level for those entries is `info`.
-
-This means that we can override the standard library logger easily:
-
-```go
-logger := logrus.New()
-logger.Formatter = &logrus.JSONFormatter{}
-
-// Use logrus for standard log output
-// Note that `log` here references stdlib's log
-// Not logrus imported under the name `log`.
-log.SetOutput(logger.Writer())
-```
-
-#### Rotation
-
-Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotate(8)`) that can compress and delete old log
-entries. It should not be a feature of the application-level logger.
-
-#### Tools
-
-| Tool | Description |
-| ---- | ----------- |
-|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
-|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
-
-#### Testing
-
-Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-
-* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
-* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
-
-```go
-import(
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestSomething(t*testing.T){
- logger, hook := test.NewNullLogger()
- logger.Error("Helloerror")
-
- assert.Equal(t, 1, len(hook.Entries))
- assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
- assert.Equal(t, "Helloerror", hook.LastEntry().Message)
-
- hook.Reset()
- assert.Nil(t, hook.LastEntry())
-}
-```
-
-#### Fatal handlers
-
-Logrus can register one or more functions that will be called when any `fatal`
-level message is logged. The registered handlers will be executed before
-logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
-to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
-
-```
-...
-handler := func() {
- // gracefully shutdown something...
-}
-logrus.RegisterExitHandler(handler)
-...
-```
-
-#### Thread safety
-
-By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
-If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
-
-Situation when locking is not needed includes:
-
-* You have no hooks registered, or hooks calling is already thread-safe.
-
-* Writing to logger.Out is already thread-safe, for example:
-
- 1) logger.Out is protected by locks.
-
- 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
-
- (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml
deleted file mode 100644
index 96c2ce15..00000000
--- a/vendor/github.com/Sirupsen/logrus/appveyor.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-version: "{build}"
-platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
-branches:
- only:
- - master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
-build_script:
- - go get -t
- - go test
diff --git a/vendor/github.com/alecthomas/template/LICENSE b/vendor/github.com/alecthomas/template/LICENSE
deleted file mode 100644
index 74487567..00000000
--- a/vendor/github.com/alecthomas/template/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/alecthomas/template/README.md b/vendor/github.com/alecthomas/template/README.md
deleted file mode 100644
index ef6a8ee3..00000000
--- a/vendor/github.com/alecthomas/template/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Go's `text/template` package with newline elision
-
-This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
-
-eg.
-
-```
-{{if true}}\
-hello
-{{end}}\
-```
-
-Will result in:
-
-```
-hello\n
-```
-
-Rather than:
-
-```
-\n
-hello\n
-\n
-```
diff --git a/vendor/github.com/alecthomas/template/doc.go b/vendor/github.com/alecthomas/template/doc.go
deleted file mode 100644
index 223c595c..00000000
--- a/vendor/github.com/alecthomas/template/doc.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel.
-
-Here is a trivial example that prints "17 items are made of wool".
-
- type Inventory struct {
- Material string
- Count uint
- }
- sweaters := Inventory{"wool", 17}
- tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
- if err != nil { panic(err) }
- err = tmpl.Execute(os.Stdout, sweaters)
- if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail below.
-
-*/
-// {{/* a comment */}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest and must start and end at the
-// delimiters, as shown here.
-/*
-
- {{pipeline}}
- The default textual representation of the value of the pipeline
- is copied to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
- To simplify the appearance of if-else chains, the else action
- of an if may include another if directly; the effect is exactly
- the same as writing
- {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed. If the value is a map and the
- keys are of basic type with a defined order ("comparable"), the
- elements will be visited in sorted key order.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants, although raw strings may not span newlines.
- - The keyword nil, representing an untyped Go nil.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
- - A parenthesized instance of one the above, for grouping. The result
- may be accessed by a field or map key invocation.
- print (.F1 arg1) (.F2 arg2)
- (.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-Pipelines
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of the each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{printf "%q" (print "out" "put")}}
- A parenthesized argument.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument, that is,
- "and x y" behaves as "if x then y else x". All the
- arguments are evaluated.
- call
- Returns the result of calling the first argument, which
- must be a function, with the remaining arguments as parameters.
- Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
- Y is a func-valued field, map entry, or the like.
- The first argument must be the result of an evaluation
- that yields a value of function type (as distinct from
- a predefined function such as print). The function must
- return either one or two result values, the second of which
- is of type error. If the arguments don't match the function
- or the returned error value is non-nil, execution stops.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y". All the
- arguments are evaluated.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
- eq
- Returns the boolean truth of arg1 == arg2
- ne
- Returns the boolean truth of arg1 != arg2
- lt
- Returns the boolean truth of arg1 < arg2
- le
- Returns the boolean truth of arg1 <= arg2
- gt
- Returns the boolean truth of arg1 > arg2
- ge
- Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
- arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on basic types only (or named basic
-types, such as "type Celsius float32"). They implement the Go rules
-for comparison of values, except that size and exact type are
-ignored, so any integer value, signed or unsigned, may be compared
-with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all
-unsigned integers.) However, as usual, one may not compare an int
-with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
- `{{define "T1"}}ONE{{end}}
- {{define "T2"}}TWO{{end}}
- {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
- {{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
- ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
- err := tmpl.Execute(os.Stdout, "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-or to invoke a particular template explicitly by name,
-
- err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-*/
-package template
diff --git a/vendor/github.com/alecthomas/template/exec.go b/vendor/github.com/alecthomas/template/exec.go
deleted file mode 100644
index c3078e5d..00000000
--- a/vendor/github.com/alecthomas/template/exec.go
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "sort"
- "strings"
-
- "github.com/alecthomas/template/parse"
-)
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
- tmpl *Template
- wr io.Writer
- node parse.Node // current node, for errors
- vars []variable // push-down stack of variable values.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
- name string
- value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
- s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
- return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
- s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setVar(n int, value reflect.Value) {
- s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
- for i := s.mark() - 1; i >= 0; i-- {
- if s.vars[i].name == name {
- return s.vars[i].value
- }
- }
- s.errorf("undefined variable: %s", name)
- return zero
-}
-
-var zero reflect.Value
-
-// at marks the state to be on node n, for error reporting.
-func (s *state) at(node parse.Node) {
- s.node = node
-}
-
-// doublePercent returns the string with %'s replaced by %%, if necessary,
-// so it can be used safely inside a Printf format string.
-func doublePercent(str string) string {
- if strings.Contains(str, "%") {
- str = strings.Replace(str, "%", "%%", -1)
- }
- return str
-}
-
-// errorf formats the error and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
- name := doublePercent(s.tmpl.Name())
- if s.node == nil {
- format = fmt.Sprintf("template: %s: %s", name, format)
- } else {
- location, context := s.tmpl.ErrorContext(s.node)
- format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
- }
- panic(fmt.Errorf(format, args...))
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
- e := recover()
- if e != nil {
- switch err := e.(type) {
- case runtime.Error:
- panic(e)
- case error:
- *errp = err
- default:
- panic(e)
- }
- }
-}
-
-// ExecuteTemplate applies the template associated with t that has the given name
-// to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
- tmpl := t.tmpl[name]
- if tmpl == nil {
- return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
- }
- return tmpl.Execute(wr, data)
-}
-
-// Execute applies a parsed template to the specified data object,
-// and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
- defer errRecover(&err)
- value := reflect.ValueOf(data)
- state := &state{
- tmpl: t,
- wr: wr,
- vars: []variable{{"$", value}},
- }
- t.init()
- if t.Tree == nil || t.Root == nil {
- var b bytes.Buffer
- for name, tmpl := range t.tmpl {
- if tmpl.Tree == nil || tmpl.Root == nil {
- continue
- }
- if b.Len() > 0 {
- b.WriteString(", ")
- }
- fmt.Fprintf(&b, "%q", name)
- }
- var s string
- if b.Len() > 0 {
- s = "; defined templates are: " + b.String()
- }
- state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
- }
- state.walk(value, t.Root)
- return
-}
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, node parse.Node) {
- s.at(node)
- switch node := node.(type) {
- case *parse.ActionNode:
- // Do not pop variables so they persist until next end.
- // Also, if the action declares variables, don't print the result.
- val := s.evalPipeline(dot, node.Pipe)
- if len(node.Pipe.Decl) == 0 {
- s.printValue(node, val)
- }
- case *parse.IfNode:
- s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
- case *parse.ListNode:
- for _, node := range node.Nodes {
- s.walk(dot, node)
- }
- case *parse.RangeNode:
- s.walkRange(dot, node)
- case *parse.TemplateNode:
- s.walkTemplate(dot, node)
- case *parse.TextNode:
- if _, err := s.wr.Write(node.Text); err != nil {
- s.errorf("%s", err)
- }
- case *parse.WithNode:
- s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
- default:
- s.errorf("unknown node: %s", node)
- }
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
- defer s.pop(s.mark())
- val := s.evalPipeline(dot, pipe)
- truth, ok := isTrue(val)
- if !ok {
- s.errorf("if/with can't use %v", val)
- }
- if truth {
- if typ == parse.NodeWith {
- s.walk(val, list)
- } else {
- s.walk(dot, list)
- }
- } else if elseList != nil {
- s.walk(dot, elseList)
- }
-}
-
-// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value.
-func isTrue(val reflect.Value) (truth, ok bool) {
- if !val.IsValid() {
- // Something like var x interface{}, never set. It's a form of nil.
- return false, true
- }
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- truth = val.Len() > 0
- case reflect.Bool:
- truth = val.Bool()
- case reflect.Complex64, reflect.Complex128:
- truth = val.Complex() != 0
- case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
- truth = !val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- truth = val.Int() != 0
- case reflect.Float32, reflect.Float64:
- truth = val.Float() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- truth = val.Uint() != 0
- case reflect.Struct:
- truth = true // Struct values are always true.
- default:
- return
- }
- return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
- s.at(r)
- defer s.pop(s.mark())
- val, _ := indirect(s.evalPipeline(dot, r.Pipe))
- // mark top of stack before any variables in the body are pushed.
- mark := s.mark()
- oneIteration := func(index, elem reflect.Value) {
- // Set top var (lexically the second if there are two) to the element.
- if len(r.Pipe.Decl) > 0 {
- s.setVar(1, elem)
- }
- // Set next var (lexically the first if there are two) to the index.
- if len(r.Pipe.Decl) > 1 {
- s.setVar(2, index)
- }
- s.walk(elem, r.List)
- s.pop(mark)
- }
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- if val.Len() == 0 {
- break
- }
- for i := 0; i < val.Len(); i++ {
- oneIteration(reflect.ValueOf(i), val.Index(i))
- }
- return
- case reflect.Map:
- if val.Len() == 0 {
- break
- }
- for _, key := range sortKeys(val.MapKeys()) {
- oneIteration(key, val.MapIndex(key))
- }
- return
- case reflect.Chan:
- if val.IsNil() {
- break
- }
- i := 0
- for ; ; i++ {
- elem, ok := val.Recv()
- if !ok {
- break
- }
- oneIteration(reflect.ValueOf(i), elem)
- }
- if i == 0 {
- break
- }
- return
- case reflect.Invalid:
- break // An invalid value is likely a nil map, etc. and acts like an empty map.
- default:
- s.errorf("range can't iterate over %v", val)
- }
- if r.ElseList != nil {
- s.walk(dot, r.ElseList)
- }
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
- s.at(t)
- tmpl := s.tmpl.tmpl[t.Name]
- if tmpl == nil {
- s.errorf("template %q not defined", t.Name)
- }
- // Variables declared by the pipeline persist.
- dot = s.evalPipeline(dot, t.Pipe)
- newState := *s
- newState.tmpl = tmpl
- // No dynamic scoping: template invocations inherit no variables.
- newState.vars = []variable{{"$", dot}}
- newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
- if pipe == nil {
- return
- }
- s.at(pipe)
- for _, cmd := range pipe.Cmds {
- value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
- // If the object has type interface{}, dig down one level to the thing inside.
- if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
- value = reflect.ValueOf(value.Interface()) // lovely!
- }
- }
- for _, variable := range pipe.Decl {
- s.push(variable.Ident[0], value)
- }
- return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
- if len(args) > 1 || final.IsValid() {
- s.errorf("can't give argument to non-function %s", args[0])
- }
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
- firstWord := cmd.Args[0]
- switch n := firstWord.(type) {
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, cmd.Args, final)
- case *parse.ChainNode:
- return s.evalChainNode(dot, n, cmd.Args, final)
- case *parse.IdentifierNode:
- // Must be a function.
- return s.evalFunction(dot, n, cmd, cmd.Args, final)
- case *parse.PipeNode:
- // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
- return s.evalPipeline(dot, n)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, cmd.Args, final)
- }
- s.at(firstWord)
- s.notAFunction(cmd.Args, final)
- switch word := firstWord.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(word.True)
- case *parse.DotNode:
- return dot
- case *parse.NilNode:
- s.errorf("nil is not a command")
- case *parse.NumberNode:
- return s.idealConstant(word)
- case *parse.StringNode:
- return reflect.ValueOf(word.Text)
- }
- s.errorf("can't evaluate command %q", firstWord)
- panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
- // These are ideal constants but we don't know the type
- // and we have no context. (If it was a method argument,
- // we'd know what we need.) The syntax guides us to some extent.
- s.at(constant)
- switch {
- case constant.IsComplex:
- return reflect.ValueOf(constant.Complex128) // incontrovertible.
- case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
- return reflect.ValueOf(constant.Float64)
- case constant.IsInt:
- n := int(constant.Int64)
- if int64(n) != constant.Int64 {
- s.errorf("%s overflows int", constant.Text)
- }
- return reflect.ValueOf(n)
- case constant.IsUint:
- s.errorf("%s overflows int", constant.Text)
- }
- return zero
-}
-
-func isHexConstant(s string) bool {
- return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(field)
- return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
-}
-
-func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(chain)
- // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
- pipe := s.evalArg(dot, nil, chain.Node)
- if len(chain.Field) == 0 {
- s.errorf("internal error: no fields in evalChainNode")
- }
- return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
- // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
- s.at(variable)
- value := s.varValue(variable.Ident[0])
- if len(variable.Ident) == 1 {
- s.notAFunction(args, final)
- return value
- }
- return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
- n := len(ident)
- for i := 0; i < n-1; i++ {
- receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
- }
- // Now if it's a method, it gets the arguments.
- return s.evalField(dot, ident[n-1], node, args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(node)
- name := node.Ident
- function, ok := findFunction(name, s.tmpl)
- if !ok {
- s.errorf("%q is not a defined function", name)
- }
- return s.evalCall(dot, function, cmd, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
- if !receiver.IsValid() {
- return zero
- }
- typ := receiver.Type()
- receiver, _ = indirect(receiver)
- // Unless it's an interface, need to get to a value of type *T to guarantee
- // we see all methods of T and *T.
- ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
- ptr = ptr.Addr()
- }
- if method := ptr.MethodByName(fieldName); method.IsValid() {
- return s.evalCall(dot, method, node, fieldName, args, final)
- }
- hasArgs := len(args) > 1 || final.IsValid()
- // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
- receiver, isNil := indirect(receiver)
- if isNil {
- s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
- }
- switch receiver.Kind() {
- case reflect.Struct:
- tField, ok := receiver.Type().FieldByName(fieldName)
- if ok {
- field := receiver.FieldByIndex(tField.Index)
- if tField.PkgPath != "" { // field is unexported
- s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
- }
- // If it's a function, we must call it.
- if hasArgs {
- s.errorf("%s has arguments but cannot be invoked as function", fieldName)
- }
- return field
- }
- s.errorf("%s is not a field of struct type %s", fieldName, typ)
- case reflect.Map:
- // If it's a map, attempt to use the field name as a key.
- nameVal := reflect.ValueOf(fieldName)
- if nameVal.Type().AssignableTo(receiver.Type().Key()) {
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- return receiver.MapIndex(nameVal)
- }
- }
- s.errorf("can't evaluate field %s in type %s", fieldName, typ)
- panic("not reached")
-}
-
-var (
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
- if args != nil {
- args = args[1:] // Zeroth arg is function name/node; not passed to function.
- }
- typ := fun.Type()
- numIn := len(args)
- if final.IsValid() {
- numIn++
- }
- numFixed := len(args)
- if typ.IsVariadic() {
- numFixed = typ.NumIn() - 1 // last arg is the variadic one.
- if numIn < numFixed {
- s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
- }
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
- s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
- }
- if !goodFunc(typ) {
- // TODO: This could still be a confusing error; maybe goodFunc should provide info.
- s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
- }
- // Build the arg list.
- argv := make([]reflect.Value, numIn)
- // Args must be evaluated. Fixed args first.
- i := 0
- for ; i < numFixed && i < len(args); i++ {
- argv[i] = s.evalArg(dot, typ.In(i), args[i])
- }
- // Now the ... args.
- if typ.IsVariadic() {
- argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
- for ; i < len(args); i++ {
- argv[i] = s.evalArg(dot, argType, args[i])
- }
- }
- // Add final value if necessary.
- if final.IsValid() {
- t := typ.In(typ.NumIn() - 1)
- if typ.IsVariadic() {
- t = t.Elem()
- }
- argv[i] = s.validateType(final, t)
- }
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
- s.at(node)
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
- }
- return result[0]
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-func canBeNil(typ reflect.Type) bool {
- switch typ.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return true
- }
- return false
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
- if !value.IsValid() {
- if typ == nil || canBeNil(typ) {
- // An untyped nil interface{}. Accept as a proper nil value.
- return reflect.Zero(typ)
- }
- s.errorf("invalid value; expected %s", typ)
- }
- if typ != nil && !value.Type().AssignableTo(typ) {
- if value.Kind() == reflect.Interface && !value.IsNil() {
- value = value.Elem()
- if value.Type().AssignableTo(typ) {
- return value
- }
- // fallthrough
- }
- // Does one dereference or indirection work? We could do more, as we
- // do with method receivers, but that gets messy and method receivers
- // are much more constrained, so it makes more sense there than here.
- // Besides, one is almost always all you need.
- switch {
- case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
- value = value.Elem()
- if !value.IsValid() {
- s.errorf("dereference of nil pointer of type %s", typ)
- }
- case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
- value = value.Addr()
- default:
- s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
- }
- }
- return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- switch arg := n.(type) {
- case *parse.DotNode:
- return s.validateType(dot, typ)
- case *parse.NilNode:
- if canBeNil(typ) {
- return reflect.Zero(typ)
- }
- s.errorf("cannot assign nil to %s", typ)
- case *parse.FieldNode:
- return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
- case *parse.VariableNode:
- return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
- case *parse.PipeNode:
- return s.validateType(s.evalPipeline(dot, arg), typ)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, arg, arg, nil, zero)
- case *parse.ChainNode:
- return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
- }
- switch typ.Kind() {
- case reflect.Bool:
- return s.evalBool(typ, n)
- case reflect.Complex64, reflect.Complex128:
- return s.evalComplex(typ, n)
- case reflect.Float32, reflect.Float64:
- return s.evalFloat(typ, n)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return s.evalInteger(typ, n)
- case reflect.Interface:
- if typ.NumMethod() == 0 {
- return s.evalEmptyInterface(dot, n)
- }
- case reflect.String:
- return s.evalString(typ, n)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return s.evalUnsignedInteger(typ, n)
- }
- s.errorf("can't handle %s for arg of type %s", n, typ)
- panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.BoolNode); ok {
- value := reflect.New(typ).Elem()
- value.SetBool(n.True)
- return value
- }
- s.errorf("expected bool; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.StringNode); ok {
- value := reflect.New(typ).Elem()
- value.SetString(n.Text)
- return value
- }
- s.errorf("expected string; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
- value := reflect.New(typ).Elem()
- value.SetInt(n.Int64)
- return value
- }
- s.errorf("expected integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
- value := reflect.New(typ).Elem()
- value.SetUint(n.Uint64)
- return value
- }
- s.errorf("expected unsigned integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
- value := reflect.New(typ).Elem()
- value.SetFloat(n.Float64)
- return value
- }
- s.errorf("expected float; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
- value := reflect.New(typ).Elem()
- value.SetComplex(n.Complex128)
- return value
- }
- s.errorf("expected complex; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
- s.at(n)
- switch n := n.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(n.True)
- case *parse.DotNode:
- return dot
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, nil, zero)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, n, n, nil, zero)
- case *parse.NilNode:
- // NilNode is handled in evalArg, the only place that calls here.
- s.errorf("evalEmptyInterface: nil (can't happen)")
- case *parse.NumberNode:
- return s.idealConstant(n)
- case *parse.StringNode:
- return reflect.ValueOf(n.Text)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, nil, zero)
- case *parse.PipeNode:
- return s.evalPipeline(dot, n)
- }
- s.errorf("can't handle assignment of %s to empty interface argument", n)
- panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
- for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
- if v.IsNil() {
- return v, true
- }
- if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
- break
- }
- }
- return v, false
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
- s.at(n)
- iface, ok := printableValue(v)
- if !ok {
- s.errorf("can't print %s of type %s", n, v.Type())
- }
- fmt.Fprint(s.wr, iface)
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-func printableValue(v reflect.Value) (interface{}, bool) {
- if v.Kind() == reflect.Ptr {
- v, _ = indirect(v) // fmt.Fprint handles nil.
- }
- if !v.IsValid() {
- return "", true
- }
-
- if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
- if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
- v = v.Addr()
- } else {
- switch v.Kind() {
- case reflect.Chan, reflect.Func:
- return nil, false
- }
- }
- }
- return v.Interface(), true
-}
-
-// Types to help sort the keys in a map for reproducible output.
-
-type rvs []reflect.Value
-
-func (x rvs) Len() int { return len(x) }
-func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-type rvInts struct{ rvs }
-
-func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
-
-type rvUints struct{ rvs }
-
-func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
-
-type rvFloats struct{ rvs }
-
-func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
-
-type rvStrings struct{ rvs }
-
-func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
-
-// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
-func sortKeys(v []reflect.Value) []reflect.Value {
- if len(v) <= 1 {
- return v
- }
- switch v[0].Kind() {
- case reflect.Float32, reflect.Float64:
- sort.Sort(rvFloats{v})
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- sort.Sort(rvInts{v})
- case reflect.String:
- sort.Sort(rvStrings{v})
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- sort.Sort(rvUints{v})
- }
- return v
-}
diff --git a/vendor/github.com/alecthomas/template/funcs.go b/vendor/github.com/alecthomas/template/funcs.go
deleted file mode 100644
index 39ee5ed6..00000000
--- a/vendor/github.com/alecthomas/template/funcs.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-type FuncMap map[string]interface{}
-
-var builtins = FuncMap{
- "and": and,
- "call": call,
- "html": HTMLEscaper,
- "index": index,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-
- // Comparisons
- "eq": eq, // ==
- "ge": ge, // >=
- "gt": gt, // >
- "le": le, // <=
- "lt": lt, // <
- "ne": ne, // !=
-}
-
-var builtinFuncs = createValueFuncs(builtins)
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc checks that the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == errorType:
- return true
- }
- return false
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
- if tmpl != nil && tmpl.common != nil {
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if fn := builtinFuncs[name]; fn.IsValid() {
- return fn, true
- }
- return reflect.Value{}, false
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item interface{}, indices ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(item)
- for _, i := range indices {
- index := reflect.ValueOf(i)
- var isNil bool
- if v, isNil = indirect(v); isNil {
- return nil, fmt.Errorf("index of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- default:
- return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || x >= int64(v.Len()) {
- return nil, fmt.Errorf("index out of range: %d", x)
- }
- v = v.Index(int(x))
- case reflect.Map:
- if !index.IsValid() {
- index = reflect.Zero(v.Type().Key())
- }
- if !index.Type().AssignableTo(v.Type().Key()) {
- return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
- }
- if x := v.MapIndex(index); x.IsValid() {
- v = x
- } else {
- v = reflect.Zero(v.Type().Elem())
- }
- default:
- return nil, fmt.Errorf("can't index item of type %s", v.Type())
- }
- }
- return v.Interface(), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item interface{}) (int, error) {
- v, isNil := indirect(reflect.ValueOf(item))
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", v.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn interface{}, args ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(fn)
- typ := v.Type()
- if typ.Kind() != reflect.Func {
- return nil, fmt.Errorf("non-function of type %s", typ)
- }
- if !goodFunc(typ) {
- return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
- }
- numIn := typ.NumIn()
- var dddType reflect.Type
- if typ.IsVariadic() {
- if len(args) < numIn-1 {
- return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
- }
- dddType = typ.In(numIn - 1).Elem()
- } else {
- if len(args) != numIn {
- return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
- }
- }
- argv := make([]reflect.Value, len(args))
- for i, arg := range args {
- value := reflect.ValueOf(arg)
- // Compute the expected type. Clumsy because of variadics.
- var argType reflect.Type
- if !typ.IsVariadic() || i < numIn-1 {
- argType = typ.In(i)
- } else {
- argType = dddType
- }
- if !value.IsValid() && canBeNil(argType) {
- value = reflect.Zero(argType)
- }
- if !value.Type().AssignableTo(argType) {
- return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
- }
- argv[i] = value
- }
- result := v.Call(argv)
- if len(result) == 2 && !result[1].IsNil() {
- return result[0].Interface(), result[1].Interface().(error)
- }
- return result[0].Interface(), nil
-}
-
-// Boolean logic.
-
-func truth(a interface{}) bool {
- t, _ := isTrue(reflect.ValueOf(a))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 interface{}, args ...interface{}) interface{} {
- if !truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if !truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 interface{}, args ...interface{}) interface{} {
- if truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg interface{}) (truth bool) {
- truth, _ = isTrue(reflect.ValueOf(arg))
- return !truth
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
- errBadComparisonType = errors.New("invalid type for comparison")
- errBadComparison = errors.New("incompatible types for comparison")
- errNoComparison = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
- invalidKind kind = iota
- boolKind
- complexKind
- intKind
- floatKind
- integerKind
- stringKind
- uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
- switch v.Kind() {
- case reflect.Bool:
- return boolKind, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return intKind, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return uintKind, nil
- case reflect.Float32, reflect.Float64:
- return floatKind, nil
- case reflect.Complex64, reflect.Complex128:
- return complexKind, nil
- case reflect.String:
- return stringKind, nil
- }
- return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- if len(arg2) == 0 {
- return false, errNoComparison
- }
- for _, arg := range arg2 {
- v2 := reflect.ValueOf(arg)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind:
- truth = v1.Bool() == v2.Bool()
- case complexKind:
- truth = v1.Complex() == v2.Complex()
- case floatKind:
- truth = v1.Float() == v2.Float()
- case intKind:
- truth = v1.Int() == v2.Int()
- case stringKind:
- truth = v1.String() == v2.String()
- case uintKind:
- truth = v1.Uint() == v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- if truth {
- return true, nil
- }
- }
- return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 interface{}) (bool, error) {
- // != is the inverse of ==.
- equal, err := eq(arg1, arg2)
- return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- v2 := reflect.ValueOf(arg2)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind, complexKind:
- return false, errBadComparisonType
- case floatKind:
- truth = v1.Float() < v2.Float()
- case intKind:
- truth = v1.Int() < v2.Int()
- case stringKind:
- truth = v1.String() < v2.String()
- case uintKind:
- truth = v1.Uint() < v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 interface{}) (bool, error) {
- // <= is < or ==.
- lessThan, err := lt(arg1, arg2)
- if lessThan || err != nil {
- return lessThan, err
- }
- return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 interface{}) (bool, error) {
- // > is the inverse of <=.
- lessOrEqual, err := le(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 interface{}) (bool, error) {
- // >= is the inverse of <.
- lessThan, err := lt(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte(""") // shorter than """
- htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
- htmlAmp = []byte("&")
- htmlLt = []byte("<")
- htmlGt = []byte(">")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexAny(s, `'"&<>`) < 0 {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
- return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\x3C`)
- jsGt = []byte(`\x3E`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
- return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
- return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-// fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
- ok := false
- var s string
- // Fast path for simple common case.
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- for i, arg := range args {
- a, ok := printableValue(reflect.ValueOf(arg))
- if ok {
- args[i] = a
- } // else left fmt do its thing
- }
- s = fmt.Sprint(args...)
- }
- return s
-}
diff --git a/vendor/github.com/alecthomas/template/helper.go b/vendor/github.com/alecthomas/template/helper.go
deleted file mode 100644
index 3636fb54..00000000
--- a/vendor/github.com/alecthomas/template/helper.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the (base) name and
-// (parsed) contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-func ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(nil, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(t, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, filenames ...string) (*Template, error) {
- if len(filenames) == 0 {
- // Not really a problem, but be consistent.
- return nil, fmt.Errorf("template: no files named in call to ParseFiles")
- }
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- s := string(b)
- name := filepath.Base(filename)
- // First template becomes return value if not already defined,
- // and we use that one for subsequent New calls to associate
- // all the templates together. Also, if this file has the same name
- // as t, this file becomes the contents of t, so
- // t, err := New(name).Funcs(xxx).ParseFiles(name)
- // works. Otherwise we create a new template associated with t.
- var tmpl *Template
- if t == nil {
- t = New(name)
- }
- if name == t.Name() {
- tmpl = t
- } else {
- tmpl = t.New(name)
- }
- _, err = tmpl.Parse(s)
- if err != nil {
- return nil, err
- }
- }
- return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from the
-// files identified by the pattern, which must match at least one file. The
-// returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-func ParseGlob(pattern string) (*Template, error) {
- return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The pattern is
-// processed by filepath.Glob and must match at least one file. ParseGlob is
-// equivalent to calling t.ParseFiles with the list of files matched by the
-// pattern.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
- return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- return parseFiles(t, filenames...)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/lex.go b/vendor/github.com/alecthomas/template/parse/lex.go
deleted file mode 100644
index 55f1c051..00000000
--- a/vendor/github.com/alecthomas/template/parse/lex.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType // The type of this item.
- pos Pos // The starting position, in bytes, of this item in the input string.
- val string // The value of this item.
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemColonEquals // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier starting with '.'
- itemIdentifier // alphanumeric identifier not starting with '.'
- itemLeftDelim // left action delimiter
- itemLeftParen // '(' inside action
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemElideNewline // elide newline after right delim
- itemRightParen // ')' inside action
- itemSpace // run of spaces separating arguments
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemDot // the cursor, spelled '.'
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemNil // the untyped nil constant, easiest to treat as a keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-var key = map[string]itemType{
- ".": itemDot,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "nil": itemNil,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports
- input string // the string being scanned
- leftDelim string // start of action
- rightDelim string // end of action
- state stateFn // the next lexing function to enter
- pos Pos // current position in the input
- start Pos // start position of this item
- width Pos // width of last rune read from input
- lastPos Pos // position of most recent item returned by nextItem
- items chan item // channel of scanned items
- parenDepth int // nesting depth of ( ) exprs
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
- if int(l.pos) >= len(l.input) {
- l.width = 0
- return eof
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = Pos(w)
- l.pos += l.width
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.start, l.input[l.start:l.pos]}
- l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.IndexRune(valid, l.next()) >= 0 {
- }
- l.backup()
-}
-
-// lineNumber reports which line we're on, based on the position of
-// the previous item returned by nextItem. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
- return 1 + strings.Count(l.input[:l.lastPos], "\n")
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
- return nil
-}
-
-// nextItem returns the next item from the input.
-func (l *lexer) nextItem() item {
- item := <-l.items
- l.lastPos = item.pos
- return item
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- items: make(chan item),
- }
- go l.run()
- return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
- for l.state = lexText; l.state != nil; {
- l.state = l.state(l)
- }
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- for {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
- if l.pos > l.start {
- l.emit(itemText)
- }
- return lexLeftDelim
- }
- if l.next() == eof {
- break
- }
- }
- // Correctly reached EOF.
- if l.pos > l.start {
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present.
-func lexLeftDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.leftDelim))
- if strings.HasPrefix(l.input[l.pos:], leftComment) {
- return lexComment
- }
- l.emit(itemLeftDelim)
- l.parenDepth = 0
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- l.pos += Pos(len(leftComment))
- i := strings.Index(l.input[l.pos:], rightComment)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += Pos(i + len(rightComment))
- if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- return l.errorf("comment ends before closing delimiter")
-
- }
- l.pos += Pos(len(l.rightDelim))
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.rightDelim))
- l.emit(itemRightDelim)
- if l.peek() == '\\' {
- l.pos++
- l.emit(itemElideNewline)
- }
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate arguments; runs of spaces turn into itemSpace.
- // Pipe symbols separate and are emitted.
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- if l.parenDepth == 0 {
- return lexRightDelim
- }
- return l.errorf("unclosed left paren")
- }
- switch r := l.next(); {
- case r == eof || isEndOfLine(r):
- return l.errorf("unclosed action")
- case isSpace(r):
- return lexSpace
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemColonEquals)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexVariable
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < Pos(len(l.input)) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexField
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r == '(':
- l.emit(itemLeftParen)
- l.parenDepth++
- return lexInsideAction
- case r == ')':
- l.emit(itemRightParen)
- l.parenDepth--
- if l.parenDepth < 0 {
- return l.errorf("unexpected right paren %#U", r)
- }
- return lexInsideAction
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- return lexInsideAction
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// One space has already been seen.
-func lexSpace(l *lexer) stateFn {
- for isSpace(l.peek()) {
- l.next()
- }
- l.emit(itemSpace)
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- switch {
- case key[word] > itemKeyword:
- l.emit(key[word])
- case word[0] == '.':
- l.emit(itemField)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
- return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "$".
- l.emit(itemVariable)
- return lexInsideAction
- }
- return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "." or "$".
- if typ == itemVariable {
- l.emit(itemVariable)
- } else {
- l.emit(itemDot)
- }
- return lexInsideAction
- }
- var r rune
- for {
- r = l.next()
- if !isAlphaNumeric(r) {
- l.backup()
- break
- }
- }
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- l.emit(typ)
- return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
- r := l.peek()
- if isSpace(r) || isEndOfLine(r) {
- return true
- }
- switch r {
- case eof, '.', ',', '|', ':', ')', '(':
- return true
- }
- // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
- // succeed but should fail) but only in extremely rare cases caused by willfully
- // bad choice of delimiter.
- if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
- return true
- }
- return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789"
- if l.accept("0") && l.accept("xX") {
- digits = "0123456789abcdefABCDEF"
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof, '\n':
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t'
-}
-
-// isEndOfLine reports whether r is an end-of-line character.
-func isEndOfLine(r rune) bool {
- return r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
diff --git a/vendor/github.com/alecthomas/template/parse/node.go b/vendor/github.com/alecthomas/template/parse/node.go
deleted file mode 100644
index 55c37f6d..00000000
--- a/vendor/github.com/alecthomas/template/parse/node.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-)
-
-var textFormat = "%s" // Changed to "%q" in tests for better error messages.
-
-// A Node is an element in the parse tree. The interface is trivial.
-// The interface contains an unexported method so that only
-// types local to this package can satisfy it.
-type Node interface {
- Type() NodeType
- String() string
- // Copy does a deep copy of the Node and all its components.
- // To avoid type assertions, some XxxNodes also have specialized
- // CopyXxx methods that return *XxxNode.
- Copy() Node
- Position() Pos // byte position of start of node in full original input string
- // tree returns the containing *Tree.
- // It is unexported so all implementations of Node are in this package.
- tree() *Tree
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
- return p
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-const (
- NodeText NodeType = iota // Plain text.
- NodeAction // A non-control action such as a field evaluation.
- NodeBool // A boolean constant.
- NodeChain // A sequence of field accesses.
- NodeCommand // An element of a pipeline.
- NodeDot // The cursor, dot.
- nodeElse // An else action. Not added to tree.
- nodeEnd // An end action. Not added to tree.
- NodeField // A field or method name.
- NodeIdentifier // An identifier; always a function name.
- NodeIf // An if action.
- NodeList // A list of Nodes.
- NodeNil // An untyped nil constant.
- NodeNumber // A numerical constant.
- NodePipe // A pipeline of commands.
- NodeRange // A range action.
- NodeString // A string constant.
- NodeTemplate // A template invocation action.
- NodeVariable // A $ variable.
- NodeWith // A with action.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
- NodeType
- Pos
- tr *Tree
- Nodes []Node // The element nodes in lexical order.
-}
-
-func (t *Tree) newList(pos Pos) *ListNode {
- return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
-}
-
-func (l *ListNode) append(n Node) {
- l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) tree() *Tree {
- return l.tr
-}
-
-func (l *ListNode) String() string {
- b := new(bytes.Buffer)
- for _, n := range l.Nodes {
- fmt.Fprint(b, n)
- }
- return b.String()
-}
-
-func (l *ListNode) CopyList() *ListNode {
- if l == nil {
- return l
- }
- n := l.tr.newList(l.Pos)
- for _, elem := range l.Nodes {
- n.append(elem.Copy())
- }
- return n
-}
-
-func (l *ListNode) Copy() Node {
- return l.CopyList()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
- NodeType
- Pos
- tr *Tree
- Text []byte // The text; may span newlines.
-}
-
-func (t *Tree) newText(pos Pos, text string) *TextNode {
- return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
- return fmt.Sprintf(textFormat, t.Text)
-}
-
-func (t *TextNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TextNode) Copy() Node {
- return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Decl []*VariableNode // Variable declarations in lexical order.
- Cmds []*CommandNode // The commands in lexical order.
-}
-
-func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
- return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
- p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
- s := ""
- if len(p.Decl) > 0 {
- for i, v := range p.Decl {
- if i > 0 {
- s += ", "
- }
- s += v.String()
- }
- s += " := "
- }
- for i, c := range p.Cmds {
- if i > 0 {
- s += " | "
- }
- s += c.String()
- }
- return s
-}
-
-func (p *PipeNode) tree() *Tree {
- return p.tr
-}
-
-func (p *PipeNode) CopyPipe() *PipeNode {
- if p == nil {
- return p
- }
- var decl []*VariableNode
- for _, d := range p.Decl {
- decl = append(decl, d.Copy().(*VariableNode))
- }
- n := p.tr.newPipeline(p.Pos, p.Line, decl)
- for _, c := range p.Cmds {
- n.append(c.Copy().(*CommandNode))
- }
- return n
-}
-
-func (p *PipeNode) Copy() Node {
- return p.CopyPipe()
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline in the action.
-}
-
-func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
- return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
- return fmt.Sprintf("{{%s}}", a.Pipe)
-
-}
-
-func (a *ActionNode) tree() *Tree {
- return a.tr
-}
-
-func (a *ActionNode) Copy() Node {
- return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
- NodeType
- Pos
- tr *Tree
- Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func (t *Tree) newCommand(pos Pos) *CommandNode {
- return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
-}
-
-func (c *CommandNode) append(arg Node) {
- c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
- s := ""
- for i, arg := range c.Args {
- if i > 0 {
- s += " "
- }
- if arg, ok := arg.(*PipeNode); ok {
- s += "(" + arg.String() + ")"
- continue
- }
- s += arg.String()
- }
- return s
-}
-
-func (c *CommandNode) tree() *Tree {
- return c.tr
-}
-
-func (c *CommandNode) Copy() Node {
- if c == nil {
- return c
- }
- n := c.tr.newCommand(c.Pos)
- for _, c := range c.Args {
- n.append(c.Copy())
- }
- return n
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
- NodeType
- Pos
- tr *Tree
- Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
- return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
- i.Pos = pos
- return i
-}
-
-// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
- i.tr = t
- return i
-}
-
-func (i *IdentifierNode) String() string {
- return i.Ident
-}
-
-func (i *IdentifierNode) tree() *Tree {
- return i.tr
-}
-
-func (i *IdentifierNode) Copy() Node {
- return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
-}
-
-// VariableNode holds a list of variable names, possibly with chained field
-// accesses. The dollar sign is part of the (first) name.
-type VariableNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // Variable name and fields in lexical order.
-}
-
-func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
- return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
- s := ""
- for i, id := range v.Ident {
- if i > 0 {
- s += "."
- }
- s += id
- }
- return s
-}
-
-func (v *VariableNode) tree() *Tree {
- return v.tr
-}
-
-func (v *VariableNode) Copy() Node {
- return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
-}
-
-// DotNode holds the special identifier '.'.
-type DotNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newDot(pos Pos) *DotNode {
- return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
-}
-
-func (d *DotNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeDot
-}
-
-func (d *DotNode) String() string {
- return "."
-}
-
-func (d *DotNode) tree() *Tree {
- return d.tr
-}
-
-func (d *DotNode) Copy() Node {
- return d.tr.newDot(d.Pos)
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newNil(pos Pos) *NilNode {
- return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
-}
-
-func (n *NilNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeNil
-}
-
-func (n *NilNode) String() string {
- return "nil"
-}
-
-func (n *NilNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NilNode) Copy() Node {
- return n.tr.newNil(n.Pos)
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newField(pos Pos, ident string) *FieldNode {
- return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
- s := ""
- for _, id := range f.Ident {
- s += "." + id
- }
- return s
-}
-
-func (f *FieldNode) tree() *Tree {
- return f.tr
-}
-
-func (f *FieldNode) Copy() Node {
- return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
- NodeType
- Pos
- tr *Tree
- Node Node
- Field []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
- return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
- if len(field) == 0 || field[0] != '.' {
- panic("no dot in field")
- }
- field = field[1:] // Remove leading dot.
- if field == "" {
- panic("empty field")
- }
- c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
- s := c.Node.String()
- if _, ok := c.Node.(*PipeNode); ok {
- s = "(" + s + ")"
- }
- for _, field := range c.Field {
- s += "." + field
- }
- return s
-}
-
-func (c *ChainNode) tree() *Tree {
- return c.tr
-}
-
-func (c *ChainNode) Copy() Node {
- return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
- NodeType
- Pos
- tr *Tree
- True bool // The value of the boolean constant.
-}
-
-func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
- return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
-}
-
-func (b *BoolNode) String() string {
- if b.True {
- return "true"
- }
- return "false"
-}
-
-func (b *BoolNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BoolNode) Copy() Node {
- return b.tr.newBool(b.Pos, b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
- NodeType
- Pos
- tr *Tree
- IsInt bool // Number has an integral value.
- IsUint bool // Number has an unsigned integral value.
- IsFloat bool // Number has a floating-point value.
- IsComplex bool // Number is complex.
- Int64 int64 // The signed integer value.
- Uint64 uint64 // The unsigned integer value.
- Float64 float64 // The floating-point value.
- Complex128 complex128 // The complex value.
- Text string // The original textual representation from the input.
-}
-
-func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
- n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
- switch typ {
- case itemCharConstant:
- rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
- if err != nil {
- return nil, err
- }
- if tail != "'" {
- return nil, fmt.Errorf("malformed character constant: %s", text)
- }
- n.Int64 = int64(rune)
- n.IsInt = true
- n.Uint64 = uint64(rune)
- n.IsUint = true
- n.Float64 = float64(rune) // odd but those are the rules.
- n.IsFloat = true
- return n, nil
- case itemComplex:
- // fmt.Sscan can parse the pair, so let it do the work.
- if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
- return nil, err
- }
- n.IsComplex = true
- n.simplifyComplex()
- return n, nil
- }
- // Imaginary constants can only be complex unless they are zero.
- if len(text) > 0 && text[len(text)-1] == 'i' {
- f, err := strconv.ParseFloat(text[:len(text)-1], 64)
- if err == nil {
- n.IsComplex = true
- n.Complex128 = complex(0, f)
- n.simplifyComplex()
- return n, nil
- }
- }
- // Do integer test first so we get 0x123 etc.
- u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
- if err == nil {
- n.IsUint = true
- n.Uint64 = u
- }
- i, err := strconv.ParseInt(text, 0, 64)
- if err == nil {
- n.IsInt = true
- n.Int64 = i
- if i == 0 {
- n.IsUint = true // in case of -0.
- n.Uint64 = u
- }
- }
- // If an integer extraction succeeded, promote the float.
- if n.IsInt {
- n.IsFloat = true
- n.Float64 = float64(n.Int64)
- } else if n.IsUint {
- n.IsFloat = true
- n.Float64 = float64(n.Uint64)
- } else {
- f, err := strconv.ParseFloat(text, 64)
- if err == nil {
- n.IsFloat = true
- n.Float64 = f
- // If a floating-point extraction succeeded, extract the int if needed.
- if !n.IsInt && float64(int64(f)) == f {
- n.IsInt = true
- n.Int64 = int64(f)
- }
- if !n.IsUint && float64(uint64(f)) == f {
- n.IsUint = true
- n.Uint64 = uint64(f)
- }
- }
- }
- if !n.IsInt && !n.IsUint && !n.IsFloat {
- return nil, fmt.Errorf("illegal number syntax: %q", text)
- }
- return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
- n.IsFloat = imag(n.Complex128) == 0
- if n.IsFloat {
- n.Float64 = real(n.Complex128)
- n.IsInt = float64(int64(n.Float64)) == n.Float64
- if n.IsInt {
- n.Int64 = int64(n.Float64)
- }
- n.IsUint = float64(uint64(n.Float64)) == n.Float64
- if n.IsUint {
- n.Uint64 = uint64(n.Float64)
- }
- }
-}
-
-func (n *NumberNode) String() string {
- return n.Text
-}
-
-func (n *NumberNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NumberNode) Copy() Node {
- nn := new(NumberNode)
- *nn = *n // Easy, fast, correct.
- return nn
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
- NodeType
- Pos
- tr *Tree
- Quoted string // The original text of the string, with quotes.
- Text string // The string, after quote processing.
-}
-
-func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
- return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
- return s.Quoted
-}
-
-func (s *StringNode) tree() *Tree {
- return s.tr
-}
-
-func (s *StringNode) Copy() Node {
- return s.tr.newString(s.Pos, s.Quoted, s.Text)
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newEnd(pos Pos) *endNode {
- return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
-}
-
-func (e *endNode) String() string {
- return "{{end}}"
-}
-
-func (e *endNode) tree() *Tree {
- return e.tr
-}
-
-func (e *endNode) Copy() Node {
- return e.tr.newEnd(e.Pos)
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
-}
-
-func (t *Tree) newElse(pos Pos, line int) *elseNode {
- return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
- return nodeElse
-}
-
-func (e *elseNode) String() string {
- return "{{else}}"
-}
-
-func (e *elseNode) tree() *Tree {
- return e.tr
-}
-
-func (e *elseNode) Copy() Node {
- return e.tr.newElse(e.Pos, e.Line)
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline to be evaluated.
- List *ListNode // What to execute if the value is non-empty.
- ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
- name := ""
- switch b.NodeType {
- case NodeIf:
- name = "if"
- case NodeRange:
- name = "range"
- case NodeWith:
- name = "with"
- default:
- panic("unknown branch type")
- }
- if b.ElseList != nil {
- return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
- }
- return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
-}
-
-func (b *BranchNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BranchNode) Copy() Node {
- switch b.NodeType {
- case NodeIf:
- return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeRange:
- return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeWith:
- return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- default:
- panic("unknown branch type")
- }
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
- BranchNode
-}
-
-func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
- return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (i *IfNode) Copy() Node {
- return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
- BranchNode
-}
-
-func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
- return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (r *RangeNode) Copy() Node {
- return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
- BranchNode
-}
-
-func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
- return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (w *WithNode) Copy() Node {
- return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Name string // The name of the template (unquoted).
- Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
- return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
- if t.Pipe == nil {
- return fmt.Sprintf("{{template %q}}", t.Name)
- }
- return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
-}
-
-func (t *TemplateNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TemplateNode) Copy() Node {
- return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
-}
diff --git a/vendor/github.com/alecthomas/template/parse/parse.go b/vendor/github.com/alecthomas/template/parse/parse.go
deleted file mode 100644
index 0d77ade8..00000000
--- a/vendor/github.com/alecthomas/template/parse/parse.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
- Name string // name of the template represented by the tree.
- ParseName string // name of the top-level template during parsing, for error messages.
- Root *ListNode // top-level root of the tree.
- text string // text parsed to create the template (or its parent)
- // Parsing only; cleared after parse.
- funcs []map[string]interface{}
- lex *lexer
- token [3]item // three-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
-}
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
- if t == nil {
- return nil
- }
- return &Tree{
- Name: t.Name,
- ParseName: t.ParseName,
- Root: t.Root.CopyList(),
- text: t.text,
- }
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
- treeSet = make(map[string]*Tree)
- t := New(name)
- t.text = text
- _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
- return
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
- t.token[1] = t1
- t.token[2] = t2
- t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- t.backup()
- return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
- pos := int(n.Position())
- tree := n.tree()
- if tree == nil {
- tree = t
- }
- text := tree.text[:pos]
- byteNum := strings.LastIndex(text, "\n")
- if byteNum == -1 {
- byteNum = pos // On first line.
- } else {
- byteNum++ // After the newline.
- byteNum = pos - byteNum
- }
- lineNum := 1 + strings.Count(text, "\n")
- context = n.String()
- if len(context) > 20 {
- context = fmt.Sprintf("%.20s...", context)
- }
- return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected {
- t.unexpected(token, context)
- }
- return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected1 && token.typ != expected2 {
- t.unexpected(token, context)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.stopParse()
- }
- *errp = e.(error)
- }
- return
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
- defer t.recover(&err)
- t.ParseName = t.Name
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
- t.text = text
- t.parse(treeSet)
- t.add(treeSet)
- t.stopParse()
- return t, nil
-}
-
-// add adds tree to the treeSet.
-func (t *Tree) add(treeSet map[string]*Tree) {
- tree := treeSet[t.Name]
- if tree == nil || IsEmptyTree(tree.Root) {
- treeSet[t.Name] = t
- return
- }
- if !IsEmptyTree(t.Root) {
- t.errorf("template: multiple definition of template %q", t.Name)
- }
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
-func IsEmptyTree(n Node) bool {
- switch n := n.(type) {
- case nil:
- return true
- case *ActionNode:
- case *IfNode:
- case *ListNode:
- for _, node := range n.Nodes {
- if !IsEmptyTree(node) {
- return false
- }
- }
- return true
- case *RangeNode:
- case *TemplateNode:
- case *TextNode:
- return len(bytes.TrimSpace(n.Text)) == 0
- case *WithNode:
- default:
- panic("unknown node: " + n.String())
- }
- return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
- t.Root = t.newList(t.peek().pos)
- for t.peek().typ != itemEOF {
- if t.peek().typ == itemLeftDelim {
- delim := t.next()
- if t.nextNonSpace().typ == itemDefine {
- newT := New("definition") // name will be updated once we know it.
- newT.text = t.text
- newT.ParseName = t.ParseName
- newT.startParse(t.funcs, t.lex)
- newT.parseDefinition(treeSet)
- continue
- }
- t.backup2(delim)
- }
- n := t.textOrAction()
- if n.Type() == nodeEnd {
- t.errorf("unexpected %s", n)
- }
- t.Root.append(n)
- }
- return nil
-}
-
-// parseDefinition parses a {{define}} ... {{end}} template definition and
-// installs the definition in the treeSet map. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
- const context = "define clause"
- name := t.expectOneOf(itemString, itemRawString, context)
- var err error
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- var end Node
- t.Root, end = t.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.add(treeSet)
- t.stopParse()
-}
-
-// itemList:
-// textOrAction*
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
- list = t.newList(t.peekNonSpace().pos)
- for t.peekNonSpace().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- t.errorf("unexpected EOF")
- return
-}
-
-// textOrAction:
-// text | action
-func (t *Tree) textOrAction() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemElideNewline:
- return t.elideNewline()
- case itemText:
- return t.newText(token.pos, token.val)
- case itemLeftDelim:
- return t.action()
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-// elideNewline:
-// Remove newlines trailing rightDelim if \\ is present.
-func (t *Tree) elideNewline() Node {
- token := t.peek()
- if token.typ != itemText {
- t.unexpected(token, "input")
- return nil
- }
-
- t.next()
- stripped := strings.TrimLeft(token.val, "\n\r")
- diff := len(token.val) - len(stripped)
- if diff > 0 {
- // This is a bit nasty. We mutate the token in-place to remove
- // preceding newlines.
- token.pos += Pos(diff)
- token.val = stripped
- }
- return t.newText(token.pos, token.val)
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.nextNonSpace(); token.typ {
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- // Do not pop variables; they persist until "end".
- return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
-}
-
-// Pipeline:
-// declarations? command ('|' command)*
-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
- var decl []*VariableNode
- pos := t.peekNonSpace().pos
- // Are there declarations?
- for {
- if v := t.peekNonSpace(); v.typ == itemVariable {
- t.next()
- // Since space is a token, we need 3-token look-ahead here in the worst case:
- // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
- // argument variable rather than a declaration. So remember the token
- // adjacent to the variable so we can push it back if necessary.
- tokenAfterVariable := t.peek()
- if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
- t.nextNonSpace()
- variable := t.newVariable(v.pos, v.val)
- decl = append(decl, variable)
- t.vars = append(t.vars, v.val)
- if next.typ == itemChar && next.val == "," {
- if context == "range" && len(decl) < 2 {
- continue
- }
- t.errorf("too many declarations in %s", context)
- }
- } else if tokenAfterVariable.typ == itemSpace {
- t.backup3(v, tokenAfterVariable)
- } else {
- t.backup2(v)
- }
- }
- break
- }
- pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
- for {
- switch token := t.nextNonSpace(); token.typ {
- case itemRightDelim, itemRightParen:
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- if token.typ == itemRightParen {
- t.backup()
- }
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
- defer t.popVars(len(t.vars))
- line = t.lex.lineNumber()
- pipe = t.pipeline(context)
- var next Node
- list, next = t.itemList()
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- if allowElseIf {
- // Special case for "else if". If the "else" is followed immediately by an "if",
- // the elseControl will have left the "if" token pending. Treat
- // {{if a}}_{{else if b}}_{{end}}
- // as
- // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
- // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
- // is assumed. This technique works even for long if-else-if chains.
- // TODO: Should we allow else-if in with and range?
- if t.peek().typ == itemIf {
- t.next() // Consume the "if" token.
- elseList = t.newList(next.Position())
- elseList.append(t.ifControl())
- // Do not consume the next item - only one {{end}} required.
- break
- }
- }
- elseList, next = t.itemList()
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- }
- return pipe.Position(), line, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- return t.newRange(t.parseControl(false, "range"))
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- // Special case for "else if".
- peek := t.peekNonSpace()
- if peek.typ == itemIf {
- // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
- return t.newElse(peek.pos, t.lex.lineNumber())
- }
- return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- var name string
- token := t.nextNonSpace()
- switch token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, "template invocation")
- }
- var pipe *PipeNode
- if t.nextNonSpace().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline("template")
- }
- return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
-}
-
-// command:
-// operand (space operand)*
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := t.newCommand(t.peekNonSpace().pos)
- for {
- t.peekNonSpace() // skip leading spaces.
- operand := t.operand()
- if operand != nil {
- cmd.append(operand)
- }
- switch token := t.next(); token.typ {
- case itemSpace:
- continue
- case itemError:
- t.errorf("%s", token.val)
- case itemRightDelim, itemRightParen:
- t.backup()
- case itemPipe:
- default:
- t.errorf("unexpected %s in operand; missing space?", token)
- }
- break
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// operand:
-// term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
- node := t.term()
- if node == nil {
- return nil
- }
- if t.peek().typ == itemField {
- chain := t.newChain(t.peek().pos, node)
- for t.peek().typ == itemField {
- chain.Add(t.next().val)
- }
- // Compatibility with original API: If the term is of type NodeField
- // or NodeVariable, just put more fields on the original.
- // Otherwise, keep the Chain node.
- // TODO: Switch to Chains always when we can.
- switch node.Type() {
- case NodeField:
- node = t.newField(chain.Position(), chain.String())
- case NodeVariable:
- node = t.newVariable(chain.Position(), chain.String())
- default:
- node = chain
- }
- }
- return node
-}
-
-// term:
-// literal (number, string, nil, boolean)
-// function (identifier)
-// .
-// .Field
-// $
-// '(' pipeline ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemError:
- t.errorf("%s", token.val)
- case itemIdentifier:
- if !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
- case itemDot:
- return t.newDot(token.pos)
- case itemNil:
- return t.newNil(token.pos)
- case itemVariable:
- return t.useVar(token.pos, token.val)
- case itemField:
- return t.newField(token.pos, token.val)
- case itemBool:
- return t.newBool(token.pos, token.val == "true")
- case itemCharConstant, itemComplex, itemNumber:
- number, err := t.newNumber(token.pos, token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- return number
- case itemLeftParen:
- pipe := t.pipeline("parenthesized pipeline")
- if token := t.next(); token.typ != itemRightParen {
- t.errorf("unclosed right paren: unexpected %s", token)
- }
- return pipe
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- return t.newString(token.pos, token.val, s)
- }
- t.backup()
- return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
- v := t.newVariable(pos, name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
diff --git a/vendor/github.com/alecthomas/template/template.go b/vendor/github.com/alecthomas/template/template.go
deleted file mode 100644
index 447ed2ab..00000000
--- a/vendor/github.com/alecthomas/template/template.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "reflect"
-
- "github.com/alecthomas/template/parse"
-)
-
-// common holds the information shared by related templates.
-type common struct {
- tmpl map[string]*Template
- // We use two maps, one for parsing and one for execution.
- // This separation makes the API cleaner since it doesn't
- // expose reflection to the client.
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
-}
-
-// Template is the representation of a parsed template. The *parse.Tree
-// field is exported only for use by html/template and should be treated
-// as unexported by all other clients.
-type Template struct {
- name string
- *parse.Tree
- *common
- leftDelim string
- rightDelim string
-}
-
-// New allocates a new template with the given name.
-func New(name string) *Template {
- return &Template{
- name: name,
- }
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
- return t.name
-}
-
-// New allocates a new template associated with the given one and with the same
-// delimiters. The association, which is transitive, allows one template to
-// invoke another with a {{template}} action.
-func (t *Template) New(name string) *Template {
- t.init()
- return &Template{
- name: name,
- common: t.common,
- leftDelim: t.leftDelim,
- rightDelim: t.rightDelim,
- }
-}
-
-func (t *Template) init() {
- if t.common == nil {
- t.common = new(common)
- t.tmpl = make(map[string]*Template)
- t.parseFuncs = make(FuncMap)
- t.execFuncs = make(map[string]reflect.Value)
- }
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-func (t *Template) Clone() (*Template, error) {
- nt := t.copy(nil)
- nt.init()
- nt.tmpl[t.name] = nt
- for k, v := range t.tmpl {
- if k == t.name { // Already installed.
- continue
- }
- // The associated templates share nt's common structure.
- tmpl := v.copy(nt.common)
- nt.tmpl[k] = tmpl
- }
- for k, v := range t.parseFuncs {
- nt.parseFuncs[k] = v
- }
- for k, v := range t.execFuncs {
- nt.execFuncs[k] = v
- }
- return nt, nil
-}
-
-// copy returns a shallow copy of t, with common set to the argument.
-func (t *Template) copy(c *common) *Template {
- nt := New(t.name)
- nt.Tree = t.Tree
- nt.common = c
- nt.leftDelim = t.leftDelim
- nt.rightDelim = t.rightDelim
- return nt
-}
-
-// AddParseTree creates a new template with the name and parse tree
-// and associates it with t.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
- if t.common != nil && t.tmpl[name] != nil {
- return nil, fmt.Errorf("template: redefinition of template %q", name)
- }
- nt := t.New(name)
- nt.Tree = tree
- t.tmpl[name] = nt
- return nt, nil
-}
-
-// Templates returns a slice of the templates associated with t, including t
-// itself.
-func (t *Template) Templates() []*Template {
- if t.common == nil {
- return nil
- }
- // Return a slice so we don't expose the map.
- m := make([]*Template, 0, len(t.tmpl))
- for _, v := range t.tmpl {
- m = append(m, v)
- }
- return m
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
- t.leftDelim = left
- t.rightDelim = right
- return t
-}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It panics if a value in the map is not a function with appropriate return
-// type. However, it is legal to overwrite elements of the map. The return
-// value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
- t.init()
- addValueFuncs(t.execFuncs, funcMap)
- addFuncs(t.parseFuncs, funcMap)
- return t
-}
-
-// Lookup returns the template with the given name that is associated with t,
-// or nil if there is no such template.
-func (t *Template) Lookup(name string) *Template {
- if t.common == nil {
- return nil
- }
- return t.tmpl[name]
-}
-
-// Parse parses a string into a template. Nested template definitions will be
-// associated with the top-level template t. Parse may be called multiple times
-// to parse definitions of templates to associate with t. It is an error if a
-// resulting template is non-empty (contains content other than template
-// definitions) and would replace a non-empty template with the same name.
-// (In multiple calls to Parse with the same receiver template, only one call
-// can contain text other than space, comments, and template definitions.)
-func (t *Template) Parse(text string) (*Template, error) {
- t.init()
- trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- // Add the newly parsed trees, including the one for t, into our common structure.
- for name, tree := range trees {
- // If the name we parsed is the name of this template, overwrite this template.
- // The associate method checks it's not a redefinition.
- tmpl := t
- if name != t.name {
- tmpl = t.New(name)
- }
- // Even if t == tmpl, we need to install it in the common.tmpl map.
- if replace, err := t.associate(tmpl, tree); err != nil {
- return nil, err
- } else if replace {
- tmpl.Tree = tree
- }
- tmpl.leftDelim = t.leftDelim
- tmpl.rightDelim = t.rightDelim
- }
- return t, nil
-}
-
-// associate installs the new template into the group of templates associated
-// with t. It is an error to reuse a name except to overwrite an empty
-// template. The two are already known to share the common structure.
-// The boolean return value reports wither to store this tree as t.Tree.
-func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
- if new.common != t.common {
- panic("internal error: associate not common")
- }
- name := new.name
- if old := t.tmpl[name]; old != nil {
- oldIsEmpty := parse.IsEmptyTree(old.Root)
- newIsEmpty := parse.IsEmptyTree(tree.Root)
- if newIsEmpty {
- // Whether old is empty or not, new is empty; no reason to replace old.
- return false, nil
- }
- if !oldIsEmpty {
- return false, fmt.Errorf("template: redefinition of template %q", name)
- }
- }
- t.tmpl[name] = new
- return true, nil
-}
diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING
deleted file mode 100644
index 2993ec08..00000000
--- a/vendor/github.com/alecthomas/units/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014 Alec Thomas
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md
deleted file mode 100644
index bee884e3..00000000
--- a/vendor/github.com/alecthomas/units/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Units - Helpful unit multipliers and functions for Go
-
-The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
-
-It allows for code like this:
-
-```go
-n, err := ParseBase2Bytes("1KB")
-// n == 1024
-n = units.Mebibyte * 512
-```
diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go
deleted file mode 100644
index eaadeb80..00000000
--- a/vendor/github.com/alecthomas/units/bytes.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package units
-
-// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
-// etc.).
-type Base2Bytes int64
-
-// Base-2 byte units.
-const (
- Kibibyte Base2Bytes = 1024
- KiB = Kibibyte
- Mebibyte = Kibibyte * 1024
- MiB = Mebibyte
- Gibibyte = Mebibyte * 1024
- GiB = Gibibyte
- Tebibyte = Gibibyte * 1024
- TiB = Tebibyte
- Pebibyte = Tebibyte * 1024
- PiB = Pebibyte
- Exbibyte = Pebibyte * 1024
- EiB = Exbibyte
-)
-
-var (
- bytesUnitMap = MakeUnitMap("iB", "B", 1024)
- oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
-)
-
-// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
-// and KiB are both 1024.
-func ParseBase2Bytes(s string) (Base2Bytes, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, oldBytesUnitMap)
- }
- return Base2Bytes(n), err
-}
-
-func (b Base2Bytes) String() string {
- return ToString(int64(b), 1024, "iB", "B")
-}
-
-var (
- metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
-)
-
-// MetricBytes are SI byte units (1000 bytes in a kilobyte).
-type MetricBytes SI
-
-// SI base-10 byte units.
-const (
- Kilobyte MetricBytes = 1000
- KB = Kilobyte
- Megabyte = Kilobyte * 1000
- MB = Megabyte
- Gigabyte = Megabyte * 1000
- GB = Gigabyte
- Terabyte = Gigabyte * 1000
- TB = Terabyte
- Petabyte = Terabyte * 1000
- PB = Petabyte
- Exabyte = Petabyte * 1000
- EB = Exabyte
-)
-
-// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
-func ParseMetricBytes(s string) (MetricBytes, error) {
- n, err := ParseUnit(s, metricBytesUnitMap)
- return MetricBytes(n), err
-}
-
-func (m MetricBytes) String() string {
- return ToString(int64(m), 1000, "B", "B")
-}
-
-// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
-// respectively. That is, KiB represents 1024 and KB represents 1000.
-func ParseStrictBytes(s string) (int64, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, metricBytesUnitMap)
- }
- return int64(n), err
-}
diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go
deleted file mode 100644
index 156ae386..00000000
--- a/vendor/github.com/alecthomas/units/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package units provides helpful unit multipliers and functions for Go.
-//
-// The goal of this package is to have functionality similar to the time [1] package.
-//
-//
-// [1] http://golang.org/pkg/time/
-//
-// It allows for code like this:
-//
-// n, err := ParseBase2Bytes("1KB")
-// // n == 1024
-// n = units.Mebibyte * 512
-package units
diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go
deleted file mode 100644
index 8234a9d5..00000000
--- a/vendor/github.com/alecthomas/units/si.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package units
-
-// SI units.
-type SI int64
-
-// SI unit multiples.
-const (
- Kilo SI = 1000
- Mega = Kilo * 1000
- Giga = Mega * 1000
- Tera = Giga * 1000
- Peta = Tera * 1000
- Exa = Peta * 1000
-)
-
-func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
- return map[string]float64{
- shortSuffix: 1,
- "K" + suffix: float64(scale),
- "M" + suffix: float64(scale * scale),
- "G" + suffix: float64(scale * scale * scale),
- "T" + suffix: float64(scale * scale * scale * scale),
- "P" + suffix: float64(scale * scale * scale * scale * scale),
- "E" + suffix: float64(scale * scale * scale * scale * scale * scale),
- }
-}
diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go
deleted file mode 100644
index 6527e92d..00000000
--- a/vendor/github.com/alecthomas/units/util.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package units
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
-)
-
-func ToString(n int64, scale int64, suffix, baseSuffix string) string {
- mn := len(siUnits)
- out := make([]string, mn)
- for i, m := range siUnits {
- if n%scale != 0 || i == 0 && n == 0 {
- s := suffix
- if i == 0 {
- s = baseSuffix
- }
- out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
- }
- n /= scale
- if n == 0 {
- break
- }
- }
- return strings.Join(out, "")
-}
-
-// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
-var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
-
-// leadingInt consumes the leading [0-9]* from s.
-func leadingInt(s string) (x int64, rem string, err error) {
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- break
- }
- if x >= (1<<63-10)/10 {
- // overflow
- return 0, "", errLeadingInt
- }
- x = x*10 + int64(c) - '0'
- }
- return x, s[i:], nil
-}
-
-func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
- // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
- orig := s
- f := float64(0)
- neg := false
-
- // Consume [-+]?
- if s != "" {
- c := s[0]
- if c == '-' || c == '+' {
- neg = c == '-'
- s = s[1:]
- }
- }
- // Special case: if all that is left is "0", this is zero.
- if s == "0" {
- return 0, nil
- }
- if s == "" {
- return 0, errors.New("units: invalid " + orig)
- }
- for s != "" {
- g := float64(0) // this element of the sequence
-
- var x int64
- var err error
-
- // The next character must be [0-9.]
- if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
- return 0, errors.New("units: invalid " + orig)
- }
- // Consume [0-9]*
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- g = float64(x)
- pre := pl != len(s) // whether we consumed anything before a period
-
- // Consume (\.[0-9]*)?
- post := false
- if s != "" && s[0] == '.' {
- s = s[1:]
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- scale := 1.0
- for n := pl - len(s); n > 0; n-- {
- scale *= 10
- }
- g += float64(x) / scale
- post = pl != len(s)
- }
- if !pre && !post {
- // no digits (e.g. ".s" or "-.s")
- return 0, errors.New("units: invalid " + orig)
- }
-
- // Consume unit.
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c == '.' || ('0' <= c && c <= '9') {
- break
- }
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, errors.New("units: unknown unit " + u + " in " + orig)
- }
-
- f += g * unit
- }
-
- if neg {
- f = -f
- }
- if f < float64(-1<<63) || f > float64(1<<63-1) {
- return 0, errors.New("units: overflow parsing unit")
- }
- return int64(f), nil
-}
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287d..00000000
--- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
index f4cabd66..d7d14f8e 100644
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
- for quantile, epsilon := range targets {
- if quantile*s.n <= r {
- f = (2 * epsilon * r) / quantile
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
} else {
- f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
@@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
return newStream(ƒ)
}
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 692c186f..73ff68fb 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -12,35 +12,63 @@
# Individual Persons
Aaron Hopkins
+Achille Roussel
+Alexey Palazhchenko
+Andrew Reid
Arne Hormann
+Asta Xie
+Bulat Gaifullin
Carlos Nieto
Chris Moos
+Craig Wilson
+Daniel Montoya
Daniel Nichter
Daniël van Eeden
+Dave Protasowski
DisposaBoy
+Egor Smolyakov
+Evan Shaw
Frederick Mayle
Gustavo Kristic
+Hajime Nakagami
Hanno Braun
Henri Yandell
Hirotaka Yamamoto
+ICHINOSE Shogo
INADA Naoki
+Jacek Szwec
James Harr
+Jeff Hodges
+Jeffrey Charles
Jian Zhen
Joshua Prunier
Julien Lefevre
Julien Schmidt
+Justin Li
+Justin Nuß
Kamil Dziedzic
Kevin Malachowski
+Kieron Woodhouse
Lennart Rudolph
Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
Luca Looz
Lucas Liu
Luke Scott
+Maciej Zimnoch
Michael Woolnough
Nicola Peduzzi
Olivier Mengué
+oscarzhao
Paul Bonser
+Peter Schultz
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
Runrioter Wung
+Shuode Li
Soroush Pour
Stan Putrya
Stanley Gunawan
@@ -52,5 +80,10 @@ Zhenye Xie
# Organizations
Barracuda Networks, Inc.
+Counting Ltd.
Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Percona LLC
+Pivotal Inc.
Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
deleted file mode 100644
index 6bcad7ea..00000000
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ /dev/null
@@ -1,119 +0,0 @@
-## Version 1.3 (2016-12-01)
-
-Changes:
-
- - Go 1.1 is no longer supported
- - Use decimals fields in MySQL to format time types (#249)
- - Buffer optimizations (#269)
- - TLS ServerName defaults to the host (#283)
- - Refactoring (#400, #410, #437)
- - Adjusted documentation for second generation CloudSQL (#485)
- - Documented DSN system var quoting rules (#502)
- - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
-
-New Features:
-
- - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- - Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- - Support for uint64 parameters with high bit set (#332, #345)
- - Cleartext authentication plugin support (#327)
- - Exported ParseDSN function and the Config struct (#403, #419, #429)
- - Read / Write timeouts (#401)
- - Support for JSON field type (#414)
- - Support for multi-statements and multi-results (#411, #431)
- - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
- - Native password authentication plugin support (#494, #524)
-
-Bugfixes:
-
- - Fixed handling of queries without columns and rows (#255)
- - Fixed a panic when SetKeepAlive() failed (#298)
- - Handle ERR packets while reading rows (#321)
- - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- - Actually zero out bytes in handshake response (#378)
- - Fixed race condition in registering LOAD DATA INFILE handler (#383)
- - Fixed tests with MySQL 5.7.9+ (#380)
- - QueryUnescape TLS config names (#397)
- - Fixed "broken pipe" error by writing to closed socket (#390)
- - Fixed LOAD LOCAL DATA INFILE buffering (#424)
- - Fixed parsing of floats into float64 when placeholders are used (#434)
- - Fixed DSN tests with Go 1.7+ (#459)
- - Handle ERR packets while waiting for EOF (#473)
- - Invalidate connection on error while discarding additional results (#513)
- - Allow terminating packets of length 0 (#516)
-
-
-## Version 1.2 (2014-06-03)
-
-Changes:
-
- - We switched back to a "rolling release". `go get` installs the current master branch again
- - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
- - Exported errors to allow easy checking from application code
- - Enabled TCP Keepalives on TCP connections
- - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
- - The DSN parser also checks for a missing separating slash
- - Faster binary date / datetime to string formatting
- - Also exported the MySQLWarning type
- - mysqlConn.Close returns the first error encountered instead of ignoring all errors
- - writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
-
-New Features:
-
- - `RegisterDial` allows the usage of a custom dial function to establish the network connection
- - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
- - Logging of critical errors is configurable with `SetLogger`
- - Google CloudSQL support
-
-Bugfixes:
-
- - Allow more than 32 parameters in prepared statements
- - Various old_password fixes
- - Fixed TestConcurrent test to pass Go's race detection
- - Fixed appendLengthEncodedInteger for large numbers
- - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
-
-
-## Version 1.1 (2013-11-02)
-
-Changes:
-
- - Go-MySQL-Driver now requires Go 1.1
- - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
- - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
- - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
- - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
- - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
- - Optimized the buffer for reading
- - stmt.Query now caches column metadata
- - New Logo
- - Changed the copyright header to include all contributors
- - Improved the LOAD INFILE documentation
- - The driver struct is now exported to make the driver directly accessible
- - Refactored the driver tests
- - Added more benchmarks and moved all to a separate file
- - Other small refactoring
-
-New Features:
-
- - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
- - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
- - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
-
-Bugfixes:
-
- - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- - Convert to DB timezone when inserting `time.Time`
- - Splitted packets (more than 16MB) are now merged correctly
- - Fixed false positive `io.EOF` errors when the data was fully read
- - Avoid panics on reuse of closed connections
- - Fixed empty string producing false nil values
- - Fixed sign byte for positive TIME fields
-
-
-## Version 1.0 (2013-05-14)
-
-Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
deleted file mode 100644
index 8fe16bcb..00000000
--- a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Contributing Guidelines
-
-## Reporting Issues
-
-Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
-
-## Contributing Code
-
-By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
-Don't forget to add yourself to the AUTHORS file.
-
-### Code Review
-
-Everyone is invited to review and comment on pull requests.
-If it looks fine to you, comment with "LGTM" (Looks good to me).
-
-If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
-
-Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
-
-## Development Ideas
-
-If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
deleted file mode 100644
index a16012f8..00000000
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ /dev/null
@@ -1,443 +0,0 @@
-# Go-MySQL-Driver
-
-A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
-
-![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
-
----------------------------------------
- * [Features](#features)
- * [Requirements](#requirements)
- * [Installation](#installation)
- * [Usage](#usage)
- * [DSN (Data Source Name)](#dsn-data-source-name)
- * [Password](#password)
- * [Protocol](#protocol)
- * [Address](#address)
- * [Parameters](#parameters)
- * [Examples](#examples)
- * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
- * [time.Time support](#timetime-support)
- * [Unicode support](#unicode-support)
- * [Testing / Development](#testing--development)
- * [License](#license)
-
----------------------------------------
-
-## Features
- * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
- * Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
- * Automatic handling of broken connections
- * Automatic Connection Pooling *(by database/sql package)*
- * Supports queries larger than 16MB
- * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
- * Intelligent `LONG DATA` handling in prepared statements
- * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
- * Optional `time.Time` parsing
- * Optional placeholder interpolation
-
-## Requirements
- * Go 1.2 or higher
- * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
-
----------------------------------------
-
-## Installation
-Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
-```bash
-$ go get github.com/go-sql-driver/mysql
-```
-Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
-
-## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
-
-Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
-```go
-import "database/sql"
-import _ "github.com/go-sql-driver/mysql"
-
-db, err := sql.Open("mysql", "user:password@/dbname")
-```
-
-[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
-
-
-### DSN (Data Source Name)
-
-The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
-```
-[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
-```
-
-A DSN in its fullest form:
-```
-username:password@protocol(address)/dbname?param=value
-```
-
-Except for the databasename, all values are optional. So the minimal DSN is:
-```
-/dbname
-```
-
-If you do not want to preselect a database, leave `dbname` empty:
-```
-/
-```
-This has the same effect as an empty DSN string:
-```
-
-```
-
-Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
-
-#### Password
-Passwords can consist of any character. Escaping is **not** necessary.
-
-#### Protocol
-See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use an Unix domain socket if available and TCP otherwise for best performance.
-
-#### Address
-For TCP and UDP networks, addresses have the form `host:port`.
-If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
-
-For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
-
-#### Parameters
-*Parameters are case-sensitive!*
-
-Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
-
-##### `allowAllFiles`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
-
-##### `allowCleartextPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
-
-##### `allowNativePasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-`allowNativePasswords=true` allows the usage of the mysql native password method.
-
-##### `allowOldPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
-
-##### `charset`
-
-```
-Type: string
-Valid Values:
-Default: none
-```
-
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-
-Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
-Unless you need the fallback behavior, please use `collation` instead.
-
-##### `collation`
-
-```
-Type: string
-Valid Values:
-Default: utf8_general_ci
-```
-
-Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
-
-A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
-
-##### `clientFoundRows`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
-
-##### `columnsWithAlias`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
-
-```
-SELECT u.id FROM users as u
-```
-
-will return `u.id` instead of just `id` if `columnsWithAlias=true`.
-
-##### `interpolateParams`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
-
-*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
-
-##### `loc`
-
-```
-Type: string
-Valid Values:
-Default: UTC
-```
-
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
-
-Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-
-Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
-
-##### `maxAllowedPacket`
-```
-Type: decimal number
-Default: 0
-```
-
-Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
-
-##### `multiStatements`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
-
-When `multiStatements` is used, `?` parameters must only be used in the first statement.
-
-##### `parseTime`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
-
-
-##### `readTimeout`
-
-```
-Type: decimal number
-Default: 0
-```
-
-I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-##### `strict`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
-
-A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
-
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
-
-##### `timeout`
-
-```
-Type: decimal number
-Default: OS default
-```
-
-*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
-
-##### `tls`
-
-```
-Type: bool / string
-Valid Values: true, false, skip-verify,
-Default: false
-```
-
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
-
-##### `writeTimeout`
-
-```
-Type: decimal number
-Default: 0
-```
-
-I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-
-##### System Variables
-
-Any other parameters are interpreted as system variables:
- * `=`: `SET =`
- * `=`: `SET =`
- * `=%27%27`: `SET =''`
-
-Rules:
-* The values for string variables must be quoted with '
-* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`)
-
-Examples:
- * `autocommit=1`: `SET autocommit=1`
- * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
- * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
-
-
-#### Examples
-```
-user@unix(/path/to/socket)/dbname
-```
-
-```
-root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
-```
-
-```
-user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
-```
-
-Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
-```
-user:password@/dbname?sql_mode=TRADITIONAL
-```
-
-TCP via IPv6:
-```
-user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
-```
-
-TCP on a remote host, e.g. Amazon RDS:
-```
-id:password@tcp(your-amazonaws-uri.com:3306)/dbname
-```
-
-Google Cloud SQL on App Engine (First Generation MySQL Server):
-```
-user@cloudsql(project-id:instance-name)/dbname
-```
-
-Google Cloud SQL on App Engine (Second Generation MySQL Server):
-```
-user@cloudsql(project-id:regionname:instance-name)/dbname
-```
-
-TCP using default port (3306) on localhost:
-```
-user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
-```
-
-Use the default protocol (tcp) and host (localhost:3306):
-```
-user:password@/dbname
-```
-
-No Database preselected:
-```
-user:password@/
-```
-
-### `LOAD DATA LOCAL INFILE` support
-For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
-```go
-import "github.com/go-sql-driver/mysql"
-```
-
-Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
-
-To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-
-See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
-
-
-### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
-
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
-
-**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-
-Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
-
-
-### Unicode support
-Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
-
-Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
-
-Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
-
-See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-
-
-## Testing / Development
-To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
-
-Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
-If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
-
-See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
-
----------------------------------------
-
-## License
-Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
-
-Mozilla summarizes the license scope as follows:
-> MPL: The copyleft applies to any files containing MPLed code.
-
-
-That means:
- * You can **use** the **unchanged** source code both in private and commercially
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
-
-Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
-
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
-
-![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
-
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
index 565614ee..be41f2ee 100644
--- a/vendor/github.com/go-sql-driver/mysql/appengine.go
+++ b/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -11,7 +11,7 @@
package mysql
import (
- "appengine/cloudsql"
+ "google.golang.org/appengine/cloudsql"
)
func init() {
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 00000000..0b59f52e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,420 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc, false)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, bool, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, (authResp == nil), nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, false, ErrOldPassword
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := scrambleOldPassword(authData[:8], mc.cfg.Passwd)
+ return authResp, true, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, false, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return []byte(mc.cfg.Passwd), true, nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, false, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, false, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, true, nil
+ }
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ return []byte(mc.cfg.Passwd), true, nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, false, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, false, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, false, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, addNUL, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp, addNUL); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket([]byte(mc.cfg.Passwd), true)
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ data[4] = cachingSha2PasswordRequestPublicKey
+ mc.writePacket(data)
+
+ // parse public key
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ block, _ := pem.Decode(data[1:])
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
index 2001feac..eb4748bf 100644
--- a/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -130,18 +130,18 @@ func (b *buffer) takeBuffer(length int) []byte {
// smaller than defaultBufSize
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) []byte {
- if b.length == 0 {
- return b.buf[:length]
+ if b.length > 0 {
+ return nil
}
- return nil
+ return b.buf[:length]
}
// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() []byte {
- if b.length == 0 {
- return b.buf
+ if b.length > 0 {
+ return nil
}
- return nil
+ return b.buf
}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
index 82079cfb..136c9e4d 100644
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -9,6 +9,7 @@
package mysql
const defaultCollation = "utf8_general_ci"
+const binaryCollation = "binary"
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index d82c728f..e5706141 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -10,12 +10,23 @@ package mysql
import (
"database/sql/driver"
+ "io"
"net"
"strconv"
"strings"
"time"
)
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
type mysqlConn struct {
buf buffer
netConn net.Conn
@@ -29,7 +40,14 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- strict bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
}
// Handles parameters set in DSN after the connection is established
@@ -62,22 +80,41 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
func (mc *mysqlConn) Begin() (driver.Tx, error) {
- if mc.netConn == nil {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- err := mc.exec("START TRANSACTION")
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
if err == nil {
return &mysqlTx{mc}, err
}
-
- return nil, err
+ return nil, mc.markBadConn(err)
}
func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
- if mc.netConn != nil {
+ if !mc.closed.IsSet() {
err = mc.writeCommandPacket(comQuit)
}
@@ -91,26 +128,39 @@ func (mc *mysqlConn) Close() (err error) {
// is called before auth or on auth failure because MySQL will have already
// closed the network connection.
func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
// Makes cleanup idempotent
- if mc.netConn != nil {
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
}
- mc.netConn = nil
+ return ErrInvalidConn
}
- mc.cfg = nil
- mc.buf.nc = nil
+ return nil
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
- return nil, err
+ return nil, mc.markBadConn(err)
}
stmt := &mysqlStmt{
@@ -144,7 +194,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return "", driver.ErrBadConn
+ return "", ErrInvalidConn
}
buf = buf[:0]
argPos := 0
@@ -257,7 +307,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
}
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -271,7 +321,6 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
return nil, err
}
query = prepared
- args = nil
}
mc.affectedRows = 0
mc.insertId = 0
@@ -283,32 +332,43 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
insertId: int64(mc.insertId),
}, err
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err != nil {
- return err
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil && resLen > 0 {
- if err = mc.readUntilEOF(); err != nil {
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
return err
}
- err = mc.readUntilEOF()
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
}
- return err
+ return mc.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- if mc.netConn == nil {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -322,7 +382,6 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
return nil, err
}
query = prepared
- args = nil
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
@@ -335,15 +394,22 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
rows.mc = mc
if resLen == 0 {
- // no columns, no more data
- return emptyRows{}, nil
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
}
+
// Columns
- rows.columns, err = mc.readColumns(resLen)
+ rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Gets the value of the given MySQL System Variable
@@ -359,7 +425,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
if err == nil {
rows := new(textRows)
rows.mc = mc
- rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
if resLen > 0 {
// Columns
@@ -375,3 +441,21 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
}
return nil, err
}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 00000000..62796bfc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,208 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ mc.watching = true
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watcher <- ctx
+
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ return driver.ErrBadConn
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index 88cfff3f..b1e6b85e 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -9,7 +9,9 @@
package mysql
const (
- minProtocolVersion byte = 10
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
@@ -18,10 +20,11 @@ const (
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
const (
- iOK byte = 0x00
- iLocalInFile byte = 0xfb
- iEOF byte = 0xfe
- iERR byte = 0xff
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
)
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
@@ -87,8 +90,10 @@ const (
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
const (
- fieldTypeDecimal byte = iota
+ fieldTypeDecimal fieldType = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
@@ -107,7 +112,7 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON byte = iota + 0xf5
+ fieldTypeJSON fieldType = iota + 0xf5
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
@@ -161,3 +166,9 @@ const (
statusInTransReadonly
statusSessionStateChanged
)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index 0022d1f1..1a75a16e 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// Package mysql provides a MySQL driver for Go's database/sql package
+// Package mysql provides a MySQL driver for Go's database/sql package.
//
// The driver should be used via the database/sql package:
//
@@ -20,8 +20,14 @@ import (
"database/sql"
"database/sql/driver"
"net"
+ "sync"
)
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
@@ -30,12 +36,17 @@ type MySQLDriver struct{}
// Custom dial functions must be registered with RegisterDial
type DialFunc func(addr string) (net.Conn, error)
-var dials map[string]DialFunc
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialFunc
+)
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
func RegisterDial(net string, dial DialFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
if dials == nil {
dials = make(map[string]DialFunc)
}
@@ -52,16 +63,19 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
}
mc.cfg, err = ParseDSN(dsn)
if err != nil {
return nil, err
}
mc.parseTime = mc.cfg.ParseTime
- mc.strict = mc.cfg.Strict
// Connect to Server
- if dial, ok := dials[mc.cfg.Net]; ok {
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
mc.netConn, err = dial(mc.cfg.Addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
@@ -81,6 +95,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
@@ -88,20 +107,31 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc.writeTimeout = mc.cfg.WriteTimeout
// Reading Handshake Initialization Packet
- cipher, err := mc.readInitPacket()
+ authData, plugin, err := mc.readHandshakePacket()
if err != nil {
mc.cleanup()
return nil, err
}
// Send Client Authentication Packet
- if err = mc.writeAuthPacket(cipher); err != nil {
+ authResp, addNUL, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, addNUL, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, addNUL, plugin); err != nil {
mc.cleanup()
return nil, err
}
// Handle response to auth packet, switch methods if possible
- if err = handleAuthResult(mc, cipher); err != nil {
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
@@ -134,50 +164,6 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
-func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
- // Read Result Packet
- cipher, err := mc.readResultOK()
- if err == nil {
- return nil // auth successful
- }
-
- if mc.cfg == nil {
- return err // auth failed and retry not possible
- }
-
- // Retry auth if configured to do so.
- if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
- // Retry with old authentication method. Note: there are edge cases
- // where this should work but doesn't; this is currently "wontfix":
- // https://github.com/go-sql-driver/mysql/issues/184
-
- // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
- // sent and we have to keep using the cipher sent in the init packet.
- if cipher == nil {
- cipher = oldCipher
- }
-
- if err = mc.writeOldAuthPacket(cipher); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
- // Retry with clear text password for
- // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
- // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
- if err = mc.writeClearAuthPacket(); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
- if err = mc.writeNativeAuthPacket(cipher); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- }
- return err
-}
-
func init() {
sql.Register("mysql", &MySQLDriver{})
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index ac00dced..be014bab 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -10,11 +10,13 @@ package mysql
import (
"bytes"
+ "crypto/rsa"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
+ "sort"
"strconv"
"strings"
"time"
@@ -27,7 +29,9 @@ var (
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
)
-// Config is a configuration parsed from a DSN string
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
type Config struct {
User string // Username
Passwd string // Password (requires User)
@@ -38,6 +42,8 @@ type Config struct {
Collation string // Connection collation
Loc *time.Location // Location for time.Time values
MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
TLSConfig string // TLS configuration name
tls *tls.Config // TLS configuration
Timeout time.Duration // Dial timeout
@@ -53,7 +59,54 @@ type Config struct {
InterpolateParams bool // Interpolate placeholders into query string
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
- Strict bool // Return warnings as errors
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.tls != nil {
+ if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+ }
+
+ return nil
}
// FormatDSN formats the given Config into a DSN string which can be passed to
@@ -102,12 +155,12 @@ func (cfg *Config) FormatDSN() string {
}
}
- if cfg.AllowNativePasswords {
+ if !cfg.AllowNativePasswords {
if hasParam {
- buf.WriteString("&allowNativePasswords=true")
+ buf.WriteString("&allowNativePasswords=false")
} else {
hasParam = true
- buf.WriteString("?allowNativePasswords=true")
+ buf.WriteString("?allowNativePasswords=false")
}
}
@@ -195,15 +248,25 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.ReadTimeout.String())
}
- if cfg.Strict {
+ if cfg.RejectReadOnly {
if hasParam {
- buf.WriteString("&strict=true")
+ buf.WriteString("&rejectReadOnly=true")
} else {
hasParam = true
- buf.WriteString("?strict=true")
+ buf.WriteString("?rejectReadOnly=true")
}
}
+ if len(cfg.ServerPubKey) > 0 {
+ if hasParam {
+ buf.WriteString("&serverPubKey=")
+ } else {
+ hasParam = true
+ buf.WriteString("?serverPubKey=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
+ }
+
if cfg.Timeout > 0 {
if hasParam {
buf.WriteString("&timeout=")
@@ -234,7 +297,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.WriteTimeout.String())
}
- if cfg.MaxAllowedPacket > 0 {
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
@@ -247,7 +310,12 @@ func (cfg *Config) FormatDSN() string {
// other params
if cfg.Params != nil {
- for param, value := range cfg.Params {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
if hasParam {
buf.WriteByte('&')
} else {
@@ -257,7 +325,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(param)
buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(value))
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
}
}
@@ -267,10 +335,7 @@ func (cfg *Config) FormatDSN() string {
// ParseDSN parses the DSN string to a Config
func ParseDSN(dsn string) (cfg *Config, err error) {
// New config with some default values
- cfg = &Config{
- Loc: time.UTC,
- Collation: defaultCollation,
- }
+ cfg = NewConfig()
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
@@ -338,28 +403,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
return nil, errInvalidDSNNoSlash
}
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
- return nil, errInvalidDSNUnsafeCollation
- }
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
+ if err = cfg.normalize(); err != nil {
+ return nil, err
}
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
-
- }
-
return
}
@@ -374,7 +420,6 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// cfg params
switch value := param[1]; param[0] {
-
// Disable INFILE whitelist / enable all files
case "allowAllFiles":
var isBool bool
@@ -472,14 +517,32 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
- // Strict mode
- case "strict":
+ // Reject read-only connections
+ case "rejectReadOnly":
var isBool bool
- cfg.Strict, isBool = readBool(value)
+ cfg.RejectReadOnly, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+
+ if pubKey := getServerPubKey(name); pubKey != nil {
+ cfg.ServerPubKey = name
+ cfg.pubKey = pubKey
+ } else {
+ return errors.New("invalid value / unknown server pub key name: " + name)
+ }
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
// Dial Timeout
case "timeout":
cfg.Timeout, err = time.ParseDuration(value)
@@ -506,14 +569,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return fmt.Errorf("invalid value for TLS config name: %v", err)
}
- if tlsConfig, ok := tlsConfigRegister[name]; ok {
- if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
- host, _, err := net.SplitHostPort(cfg.Addr)
- if err == nil {
- tlsConfig.ServerName = host
- }
- }
-
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
cfg.TLSConfig = name
cfg.tls = tlsConfig
} else {
@@ -546,3 +602,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 857854e1..760782ff 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -9,10 +9,8 @@
package mysql
import (
- "database/sql/driver"
"errors"
"fmt"
- "io"
"log"
"os"
)
@@ -31,6 +29,12 @@ var (
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
)
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
@@ -59,74 +63,3 @@ type MySQLError struct {
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
-
-// MySQLWarnings is an error type which represents a group of one or more MySQL
-// warnings
-type MySQLWarnings []MySQLWarning
-
-func (mws MySQLWarnings) Error() string {
- var msg string
- for i, warning := range mws {
- if i > 0 {
- msg += "\r\n"
- }
- msg += fmt.Sprintf(
- "%s %s: %s",
- warning.Level,
- warning.Code,
- warning.Message,
- )
- }
- return msg
-}
-
-// MySQLWarning is an error type which represents a single MySQL warning.
-// Warnings are returned in groups only. See MySQLWarnings
-type MySQLWarning struct {
- Level string
- Code string
- Message string
-}
-
-func (mc *mysqlConn) getWarnings() (err error) {
- rows, err := mc.Query("SHOW WARNINGS", nil)
- if err != nil {
- return
- }
-
- var warnings = MySQLWarnings{}
- var values = make([]driver.Value, 3)
-
- for {
- err = rows.Next(values)
- switch err {
- case nil:
- warning := MySQLWarning{}
-
- if raw, ok := values[0].([]byte); ok {
- warning.Level = string(raw)
- } else {
- warning.Level = fmt.Sprintf("%s", values[0])
- }
- if raw, ok := values[1].([]byte); ok {
- warning.Code = string(raw)
- } else {
- warning.Code = fmt.Sprintf("%s", values[1])
- }
- if raw, ok := values[2].([]byte); ok {
- warning.Message = string(raw)
- } else {
- warning.Message = fmt.Sprintf("%s", values[0])
- }
-
- warnings = append(warnings, warning)
-
- case io.EOF:
- return warnings
-
- default:
- rows.Close()
- return
- }
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 00000000..e1e2ece4
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 547357cf..273cb0ba 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -147,7 +147,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
- if err == nil {
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
data := make([]byte, 4+packetSize)
var n int
for err == nil {
@@ -173,8 +174,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
- _, err = mc.readResultOK()
- return err
+ return mc.readResultOK()
}
mc.readPacket()
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index aafe9793..d873a97b 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -30,9 +30,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet header
data, err := mc.buf.readNext(4)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// packet length [24 bit]
@@ -54,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if prevData == nil {
errLog.Print(ErrMalformPkt)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
return prevData, nil
@@ -63,9 +66,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// return data if this was the last packet
@@ -125,33 +131,47 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
+ mc.cleanup()
errLog.Print(ErrMalformPkt)
} else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
errLog.Print(err)
}
- return driver.ErrBadConn
+ return ErrInvalidConn
}
}
/******************************************************************************
-* Initialisation Process *
+* Initialization Process *
******************************************************************************/
// Handshake Initialization Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
-func (mc *mysqlConn) readInitPacket() ([]byte, error) {
+func (mc *mysqlConn) readHandshakePacket() ([]byte, string, error) {
data, err := mc.readPacket()
if err != nil {
- return nil, err
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return nil, "", err
}
if data[0] == iERR {
- return nil, mc.handleErrorPacket(data)
+ return nil, "", mc.handleErrorPacket(data)
}
// protocol version [1 byte]
if data[0] < minProtocolVersion {
- return nil, fmt.Errorf(
+ return nil, "", fmt.Errorf(
"unsupported protocol version %d. Version %d or higher is required",
data[0],
minProtocolVersion,
@@ -163,7 +183,7 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
// first part of the password cipher [8 bytes]
- cipher := data[pos : pos+8]
+ authData := data[pos : pos+8]
// (filler) always 0x00 [1 byte]
pos += 8 + 1
@@ -171,13 +191,14 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
// capability flags (lower 2 bytes) [2 bytes]
mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
if mc.flags&clientProtocol41 == 0 {
- return nil, ErrOldProtocol
+ return nil, "", ErrOldProtocol
}
if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
- return nil, ErrNoTLS
+ return nil, "", ErrNoTLS
}
pos += 2
+ plugin := ""
if len(data) > pos {
// character set [1 byte]
// status flags [2 bytes]
@@ -198,32 +219,34 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
//
// The official Python library uses the fixed length 12
// which seems to work but technically could have a hidden bug.
- cipher = append(cipher, data[pos:pos+12]...)
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
- // TODO: Verify string termination
// EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
// \NUL otherwise
- //
- //if data[len(data)-1] == 0 {
- // return
- //}
- //return ErrMalformPkt
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
// make a memory safe copy of the cipher slice
var b [20]byte
- copy(b[:], cipher)
- return b[:], nil
+ copy(b[:], authData)
+ return b[:], plugin, nil
}
+ plugin = defaultAuthPlugin
+
// make a memory safe copy of the cipher slice
var b [8]byte
- copy(b[:], cipher)
- return b[:], nil
+ copy(b[:], authData)
+ return b[:], plugin, nil
}
// Client Authentication Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
-func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, addNUL bool, plugin string) error {
// Adjust client flags based on server support
clientFlags := clientProtocol41 |
clientSecureConn |
@@ -247,10 +270,19 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
clientFlags |= clientMultiStatements
}
- // User Password
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(len(authResp)))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
- pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+ if addNUL {
+ pktLen++
+ }
// To specify a db name
if n := len(mc.cfg.DBName); n > 0 {
@@ -261,9 +293,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
// Calculate packet length and get buffer with that size
data := mc.buf.takeSmallBuffer(pktLen + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// ClientFlags [32 bit]
@@ -318,9 +350,13 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
data[pos] = 0x00
pos++
- // ScrambleBuffer [length encoded integer]
- data[pos] = byte(len(scrambleBuff))
- pos += 1 + copy(data[pos+1:], scrambleBuff)
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+ if addNUL {
+ data[pos] = 0x00
+ pos++
+ }
// Databasename [null terminated string]
if len(mc.cfg.DBName) > 0 {
@@ -329,72 +365,32 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
pos++
}
- // Assume native client during response
- pos += copy(data[pos:], "mysql_native_password")
+ pos += copy(data[pos:], plugin)
data[pos] = 0x00
// Send Auth packet
return mc.writePacket(data)
}
-// Client old authentication packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
- // User password
- scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
-
- // Calculate the packet length and add a tailing 0
- pktLen := len(scrambleBuff) + 1
- data := mc.buf.takeSmallBuffer(4 + pktLen)
- if data == nil {
- // can not take the buffer. Something must be wrong with the connection
- errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte, addNUL bool) error {
+ pktLen := 4 + len(authData)
+ if addNUL {
+ pktLen++
}
-
- // Add the scrambled password [null terminated string]
- copy(data[4:], scrambleBuff)
- data[4+pktLen-1] = 0x00
-
- return mc.writePacket(data)
-}
-
-// Client clear text authentication packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeClearAuthPacket() error {
- // Calculate the packet length and add a tailing 0
- pktLen := len(mc.cfg.Passwd) + 1
- data := mc.buf.takeSmallBuffer(4 + pktLen)
+ data := mc.buf.takeSmallBuffer(pktLen)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
- // Add the clear password [null terminated string]
- copy(data[4:], mc.cfg.Passwd)
- data[4+pktLen-1] = 0x00
-
- return mc.writePacket(data)
-}
-
-// Native password authentication method
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
-
- // Calculate the packet length and add a tailing 0
- pktLen := len(scrambleBuff)
- data := mc.buf.takeSmallBuffer(4 + pktLen)
- if data == nil {
- // can not take the buffer. Something must be wrong with the connection
- errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ if addNUL {
+ data[pktLen-1] = 0x00
}
- // Add the scramble
- copy(data[4:], scrambleBuff)
-
return mc.writePacket(data)
}
@@ -408,9 +404,9 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
data := mc.buf.takeSmallBuffer(4 + 1)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -427,9 +423,9 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
pktLen := 1 + len(arg)
data := mc.buf.takeBuffer(pktLen + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -448,9 +444,9 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
data := mc.buf.takeSmallBuffer(4 + 1 + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -470,44 +466,50 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
* Result Packets *
******************************************************************************/
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() ([]byte, error) {
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
data, err := mc.readPacket()
- if err == nil {
- // packet indicator
- switch data[0] {
+ if err != nil {
+ return nil, "", err
+ }
- case iOK:
- return nil, mc.handleOkPacket(data)
+ // packet indicator
+ switch data[0] {
- case iEOF:
- if len(data) > 1 {
- pluginEndIndex := bytes.IndexByte(data, 0x00)
- plugin := string(data[1:pluginEndIndex])
- cipher := data[pluginEndIndex+1 : len(data)-1]
-
- if plugin == "mysql_old_password" {
- // using old_passwords
- return cipher, ErrOldPassword
- } else if plugin == "mysql_clear_password" {
- // using clear text password
- return cipher, ErrCleartextPassword
- } else if plugin == "mysql_native_password" {
- // using mysql default authentication method
- return cipher, ErrNativePassword
- } else {
- return cipher, ErrUnknownPlugin
- }
- } else {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, ErrOldPassword
- }
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
- default: // Error otherwise
- return nil, mc.handleErrorPacket(data)
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) < 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
}
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
}
- return nil, err
+ return mc.handleErrorPacket(data)
}
// Result Set Header Packet
@@ -550,6 +552,22 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// Error Number [16 bit uint]
errno := binary.LittleEndian.Uint16(data[1:3])
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
pos := 3
// SQL State [optional: # + 5bytes string]
@@ -584,19 +602,12 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if err := mc.discardResults(); err != nil {
- return err
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
}
// warning count [2 bytes]
- if !mc.strict {
- return nil
- }
- pos := 1 + n + m + 2
- if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
- return mc.getWarnings()
- }
return nil
}
@@ -668,14 +679,21 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
if err != nil {
return nil, err
}
+ pos += n
// Filler [uint8]
+ pos++
+
// Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
// Length [uint32]
- pos += n + 1 + 2 + 4
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
// Field type [uint8]
- columns[i].fieldType = data[pos]
+ columns[i].fieldType = fieldType(data[pos])
pos++
// Flags [uint16]
@@ -698,6 +716,10 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
func (rows *textRows) readRow(dest []driver.Value) error {
mc := rows.mc
+ if rows.rs.done {
+ return io.EOF
+ }
+
data, err := mc.readPacket()
if err != nil {
return err
@@ -707,15 +729,11 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if data[0] == iEOF && len(data) == 5 {
// server_status [2 bytes]
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
if data[0] == iERR {
rows.mc = nil
@@ -736,7 +754,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if !mc.parseTime {
continue
} else {
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp, fieldTypeDateTime,
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
@@ -808,14 +826,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Reserved [8 bit]
// Warning count [16 bit uint]
- if !stmt.mc.strict {
- return columnCount, nil
- }
- // Check for warnings count > 0, only available in MySQL > 4.1
- if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
- return columnCount, stmt.mc.getWarnings()
- }
return columnCount, nil
}
return 0, err
@@ -832,7 +843,7 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
// 2 bytes paramID
const dataOffset = 1 + 4 + 2
- // Can not use the write buffer since
+ // Cannot use the write buffer since
// a) the buffer is too small
// b) it is in use
data := make([]byte, 4+1+4+2+len(arg))
@@ -887,6 +898,12 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
const minPktLen = 4 + 1 + 4 + 1 + 4
mc := stmt.mc
+ // Determine threshould dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
// Reset packet-sequence
mc.sequence = 0
@@ -898,9 +915,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
data = mc.buf.takeCompleteBuffer()
}
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// command [1 byte]
@@ -959,7 +976,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// build NULL-bitmap
if arg == nil {
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
continue
}
@@ -967,7 +984,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// cache types and values
switch v := arg.(type) {
case int64:
- paramTypes[i+i] = fieldTypeLongLong
+ paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -983,7 +1000,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case float64:
- paramTypes[i+i] = fieldTypeDouble
+ paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -999,7 +1016,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case bool:
- paramTypes[i+i] = fieldTypeTiny
+ paramTypes[i+i] = byte(fieldTypeTiny)
paramTypes[i+i+1] = 0x00
if v {
@@ -1011,10 +1028,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case []byte:
// Common case (non-nil value) first
if v != nil {
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1029,14 +1046,14 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// Handle []byte(nil) as a NULL value
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
case string:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1048,23 +1065,25 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case time.Time:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- var val []byte
+ var a [64]byte
+ var b = a[:0]
+
if v.IsZero() {
- val = []byte("0000-00-00")
+ b = append(b, "0000-00-00"...)
} else {
- val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
}
paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(val)),
+ uint64(len(b)),
)
- paramValues = append(paramValues, val...)
+ paramValues = append(paramValues, b...)
default:
- return fmt.Errorf("can not convert type: %T", arg)
+ return fmt.Errorf("cannot convert type: %T", arg)
}
}
@@ -1097,8 +1116,6 @@ func (mc *mysqlConn) discardResults() error {
if err := mc.readUntilEOF(); err != nil {
return err
}
- } else {
- mc.status &^= statusMoreResultsExists
}
}
return nil
@@ -1116,20 +1133,17 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
+ mc := rows.mc
rows.mc = nil
// Error otherwise
- return rows.mc.handleErrorPacket(data)
+ return mc.handleErrorPacket(data)
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
@@ -1145,14 +1159,14 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
// Convert to byte-coded string
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeNULL:
dest[i] = nil
continue
// Numeric Types
case fieldTypeTiny:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(data[pos])
} else {
dest[i] = int64(int8(data[pos]))
@@ -1161,7 +1175,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeShort, fieldTypeYear:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
} else {
dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
@@ -1170,7 +1184,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeInt24, fieldTypeLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
} else {
dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
@@ -1179,7 +1193,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeLongLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
val := binary.LittleEndian.Uint64(data[pos : pos+8])
if val > math.MaxInt64 {
dest[i] = uint64ToString(val)
@@ -1193,7 +1207,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeFloat:
- dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
pos += 4
continue
@@ -1233,10 +1247,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case isNull:
dest[i] = nil
continue
- case rows.columns[i].fieldType == fieldTypeTime:
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
// database/sql does not support an equivalent to TIME, return a string
var dstlen uint8
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 8
case 1, 2, 3, 4, 5, 6:
@@ -1244,7 +1258,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
@@ -1252,10 +1266,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
var dstlen uint8
- if rows.columns[i].fieldType == fieldTypeDate {
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
dstlen = 10
} else {
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 19
case 1, 2, 3, 4, 5, 6:
@@ -1263,7 +1277,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
}
@@ -1279,7 +1293,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// Please report if this happens!
default:
- return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index c08255ee..d3b1e282 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -11,19 +11,20 @@ package mysql
import (
"database/sql/driver"
"io"
+ "math"
+ "reflect"
)
-type mysqlField struct {
- tableName string
- name string
- flags fieldFlag
- fieldType byte
- decimals byte
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
}
type mysqlRows struct {
- mc *mysqlConn
- columns []mysqlField
+ mc *mysqlConn
+ rs resultSet
+ finish func()
}
type binaryRows struct {
@@ -34,37 +35,86 @@ type textRows struct {
mysqlRows
}
-type emptyRows struct{}
-
func (rows *mysqlRows) Columns() []string {
- columns := make([]string, len(rows.columns))
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
- if tableName := rows.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.columns[i].name
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
} else {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
} else {
for i := range columns {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
+
+ rows.rs.columnNames = columns
return columns
}
-func (rows *mysqlRows) Close() error {
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
mc := rows.mc
if mc == nil {
return nil
}
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Remove unread packets from stream
- err := mc.readUntilEOF()
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
if err == nil {
if err = mc.discardResults(); err != nil {
return err
@@ -75,22 +125,66 @@ func (rows *mysqlRows) Close() error {
return err
}
-func (rows *binaryRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
}
+ rows.rs.done = true
+ }
- // Fetch next row from stream
- return rows.readRow(dest)
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
}
- return io.EOF
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
}
-func (rows *textRows) Next(dest []driver.Value) error {
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
@@ -99,14 +193,24 @@ func (rows *textRows) Next(dest []driver.Value) error {
return io.EOF
}
-func (rows emptyRows) Columns() []string {
- return nil
-}
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
-func (rows emptyRows) Close() error {
- return nil
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
}
-func (rows emptyRows) Next(dest []driver.Value) error {
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
return io.EOF
}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 7f9b0458..ce7fe4cd 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -11,6 +11,7 @@ package mysql
import (
"database/sql/driver"
"fmt"
+ "io"
"reflect"
"strconv"
)
@@ -19,11 +20,10 @@ type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
- columns []mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.netConn == nil {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
// driver.Stmt.Close can be called more than once, thus this function
// has to be idempotent.
// See also Issue #450 and golang/go#16019.
@@ -45,14 +45,14 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.netConn == nil {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -62,37 +62,45 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil {
- if resLen > 0 {
- // Columns
- err = mc.readUntilEOF()
- if err != nil {
- return nil, err
- }
-
- // Rows
- err = mc.readUntilEOF()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
}
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
}
}
- return nil, err
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- if stmt.mc.netConn == nil {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -107,14 +115,15 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
if resLen > 0 {
rows.mc = mc
- // Columns
- // If not cached, read them and cache them
- if stmt.columns == nil {
- rows.columns, err = mc.readColumns(resLen)
- stmt.columns = rows.columns
- } else {
- rows.columns = stmt.columns
- err = mc.readUntilEOF()
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
}
}
@@ -123,19 +132,36 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
type converter struct{}
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if !driver.IsValue(sv) {
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ return sv, nil
+ }
+
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
}
- return c.ConvertValue(rv.Elem().Interface())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
@@ -148,6 +174,38 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
return int64(u64), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ ek := rv.Type().Elem().Kind()
+ if ek == reflect.Uint8 {
+ return rv.Bytes(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
+ case reflect.String:
+ return rv.String(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
index 33c749b3..417d7279 100644
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,7 +13,7 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("COMMIT")
@@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("ROLLBACK")
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index d523b7ff..84d595b6 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -9,23 +9,29 @@
package mysql
import (
- "crypto/sha1"
"crypto/tls"
"database/sql/driver"
"encoding/binary"
"fmt"
"io"
"strings"
+ "sync"
+ "sync/atomic"
"time"
)
+// Registry for custom tls.Configs
var (
- tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
// if err != nil {
@@ -51,19 +57,32 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
return fmt.Errorf("key '%s' is reserved", key)
}
- if tlsConfigRegister == nil {
- tlsConfigRegister = make(map[string]*tls.Config)
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
}
- tlsConfigRegister[key] = config
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
return nil
}
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
- if tlsConfigRegister != nil {
- delete(tlsConfigRegister, key)
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
}
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
}
// Returns the bool value of the input.
@@ -80,119 +99,6 @@ func readBool(input string) (value bool, valid bool) {
return
}
-/******************************************************************************
-* Authentication *
-******************************************************************************/
-
-// Encrypt password using 4.1+ method
-func scramblePassword(scramble, password []byte) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // stage1Hash = SHA1(password)
- crypt := sha1.New()
- crypt.Write(password)
- stage1 := crypt.Sum(nil)
-
- // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
- // inner Hash
- crypt.Reset()
- crypt.Write(stage1)
- hash := crypt.Sum(nil)
-
- // outer Hash
- crypt.Reset()
- crypt.Write(scramble)
- crypt.Write(hash)
- scramble = crypt.Sum(nil)
-
- // token = scrambleHash XOR stage1Hash
- for i := range scramble {
- scramble[i] ^= stage1[i]
- }
- return scramble
-}
-
-// Encrypt password using pre 4.1 (old password) method
-// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
-type myRnd struct {
- seed1, seed2 uint32
-}
-
-const myRndMaxVal = 0x3FFFFFFF
-
-// Pseudo random number generator
-func newMyRnd(seed1, seed2 uint32) *myRnd {
- return &myRnd{
- seed1: seed1 % myRndMaxVal,
- seed2: seed2 % myRndMaxVal,
- }
-}
-
-// Tested to be equivalent to MariaDB's floating point variant
-// http://play.golang.org/p/QHvhd4qved
-// http://play.golang.org/p/RG0q4ElWDx
-func (r *myRnd) NextByte() byte {
- r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
- r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
-
- return byte(uint64(r.seed1) * 31 / myRndMaxVal)
-}
-
-// Generate binary hash from byte string using insecure pre 4.1 method
-func pwHash(password []byte) (result [2]uint32) {
- var add uint32 = 7
- var tmp uint32
-
- result[0] = 1345345333
- result[1] = 0x12345671
-
- for _, c := range password {
- // skip spaces and tabs in password
- if c == ' ' || c == '\t' {
- continue
- }
-
- tmp = uint32(c)
- result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
- result[1] += (result[1] << 8) ^ result[0]
- add += tmp
- }
-
- // Remove sign bit (1<<31)-1)
- result[0] &= 0x7FFFFFFF
- result[1] &= 0x7FFFFFFF
-
- return
-}
-
-// Encrypt password using insecure pre 4.1 method
-func scrambleOldPassword(scramble, password []byte) []byte {
- if len(password) == 0 {
- return nil
- }
-
- scramble = scramble[:8]
-
- hashPw := pwHash(password)
- hashSc := pwHash(scramble)
-
- r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
-
- var out [8]byte
- for i := range out {
- out[i] = r.NextByte() + 64
- }
-
- mask := r.NextByte()
- for i := range out {
- out[i] ^= mask
- }
-
- return out[:]
-}
-
/******************************************************************************
* Time related utils *
******************************************************************************/
@@ -519,7 +425,7 @@ func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
// Check data length
if len(b) >= n {
- return b[n-int(num) : n], false, n, nil
+ return b[n-int(num) : n : n], false, n, nil
}
return nil, false, n, io.EOF
}
@@ -548,8 +454,8 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
if len(b) == 0 {
return 0, true, 1
}
- switch b[0] {
+ switch b[0] {
// 251: NULL
case 0xfb:
return 0, true, 1
@@ -738,3 +644,67 @@ func escapeStringQuotes(buf []byte, v string) []byte {
return buf[:pos]
}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 00000000..f5956345
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 00000000..c35c2a6a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651a..00000000
--- a/vendor/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C testdata
- protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
- make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
index e392575b..3cd3249f 100644
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -35,22 +35,39 @@
package proto
import (
+ "fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
- in := reflect.ValueOf(pb)
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
if in.IsNil() {
- return pb
+ return src
}
-
out := reflect.New(in.Type().Elem())
- // out is empty so a merge is a deep copy.
- mergeStruct(out.Elem(), in.Elem())
- return out.Interface().(Message)
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
}
// Merge merges src into dst.
@@ -58,17 +75,24 @@ func Clone(pb Message) Message {
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
- // Explicit test prior to mergeStruct so that mistyped nils will fail
- panic("proto: type mismatch")
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
- // Merging nil into non-nil is a quiet no-op
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
@@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
- if emIn, ok := extendable(in.Addr().Interface()); ok {
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index aa207298..d9aa3c42 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -39,8 +39,6 @@ import (
"errors"
"fmt"
"io"
- "os"
- "reflect"
)
// errOverflow is returned when an integer is too large to be represented.
@@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
@@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
return
}
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
return string(buf), nil
}
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
- oi := o.index
-
- err := o.skip(t, tag, wire)
- if err != nil {
- return err
- }
-
- if !unrecField.IsValid() {
- return nil
- }
-
- ptr := structPointer_Bytes(base, unrecField)
-
- // Add the skipped field to struct field
- obuf := o.buf
-
- o.buf = *ptr
- o.EncodeVarint(uint64(tag<<3 | wire))
- *ptr = append(o.buf, obuf[oi:o.index]...)
-
- o.buf = obuf
-
- return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
- var u uint64
- var err error
-
- switch wire {
- case WireVarint:
- _, err = o.DecodeVarint()
- case WireFixed64:
- _, err = o.DecodeFixed64()
- case WireBytes:
- _, err = o.DecodeRawBytes(false)
- case WireFixed32:
- _, err = o.DecodeFixed32()
- case WireStartGroup:
- for {
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- fwire := int(u & 0x7)
- if fwire == WireEndGroup {
- break
- }
- ftag := int(u >> 3)
- err = o.skip(t, ftag, fwire)
- if err != nil {
- break
- }
- }
- default:
- err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
- }
- return err
-}
-
// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The method should reset the receiver before
-// decoding starts. The argument points to data that may be
+// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
@@ -395,7 +334,13 @@ type Unmarshaler interface {
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
- return UnmarshalMerge(buf, pb)
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
@@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
- // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
@@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
}
// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
- typ, base, err := getbase(pb)
- if err != nil {
- return err
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
}
- return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
}
// Unmarshal parses the protocol buffer representation in the
@@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- err := u.Unmarshal(p.buf[p.index:])
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
-
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
-
- err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
- if collectStats {
- stats.Decode++
- }
-
- return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
- var state errorState
- required, reqFields := prop.reqCount, uint64(0)
-
- var err error
- for err == nil && o.index < len(o.buf) {
- oi := o.index
- var u uint64
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- wire := int(u & 0x7)
- if wire == WireEndGroup {
- if is_group {
- if required > 0 {
- // Not enough information to determine the exact field.
- // (See below.)
- return &RequiredNotSetError{"{Unknown}"}
- }
- return nil // input is satisfied
- }
- return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
- }
- tag := int(u >> 3)
- if tag <= 0 {
- return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
- }
- fieldnum, ok := prop.decoderTags.get(tag)
- if !ok {
- // Maybe it's an extension?
- if prop.extendable {
- if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
- if err = o.skip(st, tag, wire); err == nil {
- extmap := e.extensionsWrite()
- ext := extmap[int32(tag)] // may be missing
- ext.enc = append(ext.enc, o.buf[oi:o.index]...)
- extmap[int32(tag)] = ext
- }
- continue
- }
- }
- // Maybe it's a oneof?
- if prop.oneofUnmarshaler != nil {
- m := structPointer_Interface(base, st).(Message)
- // First return value indicates whether tag is a oneof field.
- ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
- if err == ErrInternalBadWireType {
- // Map the error to something more descriptive.
- // Do the formatting here to save generated code space.
- err = fmt.Errorf("bad wiretype for oneof field in %T", m)
- }
- if ok {
- continue
- }
- }
- err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
- continue
- }
- p := prop.Prop[fieldnum]
-
- if p.dec == nil {
- fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
- continue
- }
- dec := p.dec
- if wire != WireStartGroup && wire != p.WireType {
- if wire == WireBytes && p.packedDec != nil {
- // a packable field
- dec = p.packedDec
- } else {
- err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
- continue
- }
- }
- decErr := dec(o, p, base)
- if decErr != nil && !state.shouldContinue(decErr, p) {
- err = decErr
- }
- if err == nil && p.Required {
- // Successfully decoded a required field.
- if tag <= 64 {
- // use bitmap for fields 1-64 to catch field reuse.
- var mask uint64 = 1 << uint64(tag-1)
- if reqFields&mask == 0 {
- // new required field
- reqFields |= mask
- required--
- }
- } else {
- // This is imprecise. It can be fooled by a required field
- // with a tag > 64 that is encoded twice; that's very rare.
- // A fully correct implementation would require allocating
- // a data structure, which we would like to avoid.
- required--
- }
- }
- }
- if err == nil {
- if is_group {
- return io.ErrUnexpectedEOF
- }
- if state.err != nil {
- return state.err
- }
- if required > 0 {
- // Not enough information to determine the exact field. If we use extra
- // CPU, we could determine the field only if the missing required field
- // has a tag <= 64 and we check reqFields.
- return &RequiredNotSetError{"{Unknown}"}
- }
- }
- return err
-}
-
-// Individual type decoders
-// For each,
-// u is the decoded value,
-// v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
- boolPoolSize = 16
- uint32PoolSize = 8
- uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- if len(o.bools) == 0 {
- o.bools = make([]bool, boolPoolSize)
- }
- o.bools[0] = u != 0
- *structPointer_Bool(base, p.field) = &o.bools[0]
- o.bools = o.bools[1:]
- return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- *structPointer_BoolVal(base, p.field) = u != 0
- return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
- return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
- return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64_Set(structPointer_Word64(base, p.field), o, u)
- return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
- return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_String(base, p.field) = &s
- return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_StringVal(base, p.field) = s
- return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- *structPointer_Bytes(base, p.field) = b
- return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v := structPointer_BoolSlice(base, p.field)
- *v = append(*v, u != 0)
- return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
- v := structPointer_BoolSlice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded bools
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
-
- y := *v
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- y = append(y, u != 0)
- }
-
- *v = y
- return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- structPointer_Word32Slice(base, p.field).Append(uint32(u))
- return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int32s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(uint32(u))
- }
- return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
-
- structPointer_Word64Slice(base, p.field).Append(u)
- return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int64s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(u)
- }
- return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- v := structPointer_StringSlice(base, p.field)
- *v = append(*v, s)
- return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- v := structPointer_BytesSlice(base, p.field)
- *v = append(*v, b)
- return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- oi := o.index // index at the end of this map entry
- o.index -= len(raw) // move buffer back to start of map entry
-
- mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
- if mptr.Elem().IsNil() {
- mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
- }
- v := mptr.Elem() // map[K]V
-
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // See enc_new_map for why.
- keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
- keybase := toStructPointer(keyptr.Addr()) // **K
-
- var valbase structPointer
- var valptr reflect.Value
- switch p.mtype.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valptr = reflect.ValueOf(&dummy) // *[]byte
- valbase = toStructPointer(valptr) // *[]byte
- case reflect.Ptr:
- // message; valptr is **Msg; need to allocate the intermediate pointer
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valptr.Set(reflect.New(valptr.Type().Elem()))
- valbase = toStructPointer(valptr)
- default:
- // everything else
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valbase = toStructPointer(valptr.Addr()) // **V
- }
-
- // Decode.
- // This parses a restricted wire format, namely the encoding of a message
- // with two fields. See enc_new_map for the format.
- for o.index < oi {
- // tagcode for key and value properties are always a single byte
- // because they have tags 1 and 2.
- tagcode := o.buf[o.index]
- o.index++
- switch tagcode {
- case p.mkeyprop.tagcode[0]:
- if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- case p.mvalprop.tagcode[0]:
- if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
- return err
- }
- default:
- // TODO: Should we silently skip this instead?
- return fmt.Errorf("proto: bad map data tag %d", raw[0])
- }
- }
- keyelem, valelem := keyptr.Elem(), valptr.Elem()
- if !keyelem.IsValid() {
- keyelem = reflect.Zero(p.mtype.Key())
- }
- if !valelem.IsValid() {
- valelem = reflect.Zero(p.mtype.Elem())
- }
-
- v.SetMapIndex(keyelem, valelem)
- return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
- return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
- raw, e := o.DecodeRawBytes(false)
- if e != nil {
- return e
- }
-
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := structPointer_Interface(bas, p.stype)
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, false, bas)
- o.buf = obuf
- o.index = oi
-
- return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
- v := reflect.New(p.stype)
- bas := toStructPointer(v)
- structPointer_StructPointerSlice(base, p.field).Append(bas)
-
- if is_group {
- err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
- return err
- }
-
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
return err
}
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := v.Interface()
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
- o.buf = obuf
- o.index = oi
-
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
return err
}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..dea2617c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index 8b84d1b2..c27d35f8 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -39,7 +39,6 @@ import (
"errors"
"fmt"
"reflect"
- "sort"
)
// RequiredNotSetError is the error returned if Marshal is called with
@@ -82,10 +81,6 @@ var (
const maxVarintBytes = 10 // maximum length of a varint
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
@@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error {
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
- return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
@@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error {
return nil
}
-func sizeFixed64(x uint64) int {
- return 8
-}
-
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
@@ -165,20 +165,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error {
return nil
}
-func sizeFixed32(x uint64) int {
- return 4
-}
-
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
- return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
-}
-
-func sizeZigzag64(x uint64) int {
- return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
@@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error {
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
-func sizeZigzag32(x uint64) int {
- return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error {
return nil
}
-func sizeRawBytes(b []byte) int {
- return sizeVarint(uint64(len(b))) +
- len(b)
-}
-
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
@@ -215,319 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error {
return nil
}
-func sizeStringBytes(s string) int {
- return sizeVarint(uint64(len(s))) +
- len(s)
-}
-
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- return m.Marshal()
- }
- p := NewBuffer(nil)
- err := p.Marshal(pb)
- if p.buf == nil && err == nil {
- // Return a non-nil slice on success.
- return []byte{}, nil
- }
- return p.buf, err
-}
-
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- var state errorState
- err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
- }
- return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- data, err := m.Marshal()
- p.buf = append(p.buf, data...)
- return err
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- err = p.enc_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Encode++ // Parens are to work around a goimports bug.
- }
-
- if len(p.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
- // Can the object marshal itself? If so, Size is slow.
- // TODO: add Size to Marshaler, or add a Sizer interface.
- if m, ok := pb.(Marshaler); ok {
- b, _ := m.Marshal()
- return len(b)
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return 0
- }
- if err == nil {
- n = size_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Size++ // Parens are to work around a goimports bug.
- }
-
- return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := 0
- if *v {
- x = 1
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
- v := *structPointer_BoolVal(base, p.field)
- if !v {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, 1)
- return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
- v := *structPointer_BoolVal(base, p.field)
- if !v && !p.oneof {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := word32_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := word32_Get(v)
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return ErrNil
- }
- x := word64_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return 0
- }
- x := word64_Get(v)
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := *v
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
- v := *structPointer_StringVal(base, p.field)
- if v == "" {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(v)
- return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return 0
- }
- x := *v
- n += len(p.tagcode)
- n += sizeStringBytes(x)
- return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_StringVal(base, p.field)
- if v == "" && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeStringBytes(v)
- return
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
@@ -538,825 +219,3 @@ func isNil(v reflect.Value) bool {
}
return false
}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
- var state errorState
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return ErrNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- return state.err
- }
-
- o.buf = append(o.buf, p.tagcode...)
- return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return 0
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n0 := len(p.tagcode)
- n1 := sizeRawBytes(data)
- return n0 + n1
- }
-
- n0 := len(p.tagcode)
- n1 := size_struct(p.sprop, structp)
- n2 := sizeVarint(uint64(n1)) // size of encoded length
- return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
- var state errorState
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return ErrNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
- err := o.enc_struct(p.sprop, b)
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return 0
- }
-
- n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
- n += size_struct(p.sprop, b)
- n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- for _, x := range s {
- o.buf = append(o.buf, p.tagcode...)
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
- for _, x := range s {
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- n += len(p.tagcode)
- n += sizeVarint(uint64(l))
- n += l // each bool takes exactly one byte
- return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if s == nil {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if s == nil && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(buf, uint64(x))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- bufSize += p.valSize(uint64(x))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := s.Index(i)
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := s.Index(i)
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, uint64(s.Index(i)))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(uint64(s.Index(i)))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, s.Index(i))
- }
- return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- n += p.valSize(s.Index(i))
- }
- return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, s.Index(i))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(s.Index(i))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return 0
- }
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeRawBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeStringBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return errRepeatedHasNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- continue
- }
-
- o.buf = append(o.buf, p.tagcode...)
- err := o.enc_len_struct(p.sprop, structp, &state)
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
- }
- return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return // return the size up to this point
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n += sizeRawBytes(data)
- continue
- }
-
- n0 := size_struct(p.sprop, structp)
- n1 := sizeVarint(uint64(n0)) // size of encoded length
- n += n0 + n1
- }
- return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return errRepeatedHasNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
- err := o.enc_struct(p.sprop, b)
-
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- }
- return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
- n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return // return size up to this point
- }
-
- n += size_struct(p.sprop, b)
- }
- return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
- exts := structPointer_ExtMap(base, p.field)
- if err := encodeExtensionsMap(*exts); err != nil {
- return err
- }
-
- return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
- exts := structPointer_Extensions(base, p.field)
-
- v, mu := exts.extensionsRead()
- if v == nil {
- return nil
- }
-
- mu.Lock()
- defer mu.Unlock()
- if err := encodeExtensionsMap(v); err != nil {
- return err
- }
-
- return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
- // Fast-path for common cases: zero or one extensions.
- if len(v) <= 1 {
- for _, e := range v {
- o.buf = append(o.buf, e.enc...)
- }
- return nil
- }
-
- // Sort keys to provide a deterministic encoding.
- keys := make([]int, 0, len(v))
- for k := range v {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- o.buf = append(o.buf, v[int32(k)].enc...)
- }
- return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
- v := structPointer_ExtMap(base, p.field)
- return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
- v := structPointer_Extensions(base, p.field)
- return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
- var state errorState // XXX: or do we need to plumb this through?
-
- /*
- A map defined as
- map map_field = N;
- is encoded in the same way as
- message MapFieldEntry {
- key_type key = 1;
- value_type value = 2;
- }
- repeated MapFieldEntry map_field = N;
- */
-
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
- if v.Len() == 0 {
- return nil
- }
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- enc := func() error {
- if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
- return err
- }
- return nil
- }
-
- // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
-
- keycopy.Set(key)
- valcopy.Set(val)
-
- o.buf = append(o.buf, p.tagcode...)
- if err := o.enc_len_thing(enc, &state); err != nil {
- return err
- }
- }
- return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- n := 0
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
- keycopy.Set(key)
- valcopy.Set(val)
-
- // Tag codes for key and val are the responsibility of the sub-sizer.
- keysize := p.mkeyprop.size(p.mkeyprop, keybase)
- valsize := p.mvalprop.size(p.mvalprop, valbase)
- entry := keysize + valsize
- // Add on tag code and length of map entry itself.
- n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
- }
- return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
- keycopy = reflect.New(mapType.Key()).Elem() // addressable K
- keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
- keyptr.Set(keycopy.Addr()) //
- keybase = toStructPointer(keyptr.Addr()) // **K
-
- // Value types are more varied and require special handling.
- switch mapType.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
- valbase = toStructPointer(valcopy.Addr())
- case reflect.Ptr:
- // message; the generated field type is map[K]*Msg (so V is *Msg),
- // so we only need one level of indirection.
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valbase = toStructPointer(valcopy.Addr())
- default:
- // everything else
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
- valptr.Set(valcopy.Addr()) //
- valbase = toStructPointer(valptr.Addr()) // **V
- }
- return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
- var state errorState
- // Encode fields in tag order so that decoders may use optimizations
- // that depend on the ordering.
- // https://developers.google.com/protocol-buffers/docs/encoding#order
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.enc != nil {
- err := p.enc(o, p, base)
- if err != nil {
- if err == ErrNil {
- if p.Required && state.err == nil {
- state.err = &RequiredNotSetError{p.Name}
- }
- } else if err == errRepeatedHasNil {
- // Give more context to nil values in repeated fields.
- return errors.New("repeated field " + p.OrigName + " has nil element")
- } else if !state.shouldContinue(err, p) {
- return err
- }
- }
- if len(o.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- }
- }
-
- // Do oneof fields.
- if prop.oneofMarshaler != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- if err := prop.oneofMarshaler(m, o); err == ErrNil {
- return errOneofHasNil
- } else if err != nil {
- return err
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- if len(o.buf)+len(v) > maxMarshalSize {
- return ErrTooLarge
- }
- if len(v) > 0 {
- o.buf = append(o.buf, v...)
- }
- }
-
- return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.size != nil {
- n += p.size(p, base)
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- n += len(v)
- }
-
- // Factor in any oneof fields.
- if prop.oneofSizer != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- n += prop.oneofSizer(m)
- }
-
- return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
- return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
- iLen := len(o.buf)
- o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
- iMsg := len(o.buf)
- err := enc()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- lMsg := len(o.buf) - iMsg
- lLen := sizeVarint(uint64(lMsg))
- switch x := lLen - (iMsg - iLen); {
- case x > 0: // actual length is x bytes larger than the space we reserved
- // Move msg x bytes right.
- o.buf = append(o.buf, zeroes[:x]...)
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- case x < 0: // actual length is x bytes smaller than the space we reserved
- // Move msg x bytes left.
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- o.buf = o.buf[:len(o.buf)+x] // x is negative
- }
- // Encode the length in the reserved space.
- o.buf = o.buf[:iLen]
- o.EncodeVarint(uint64(lMsg))
- o.buf = o.buf[:len(o.buf)+lMsg]
- return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
- err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
- // Ignore unset required fields.
- reqNotSet, ok := err.(*RequiredNotSetError)
- if !ok {
- return false
- }
- if s.err == nil {
- if prop != nil {
- err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
- }
- s.err = err
- }
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
index 2ed1cf59..d4db5a1c 100644
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
// set/unset mismatch
return false
}
- b1, ok := f1.Interface().(raw)
- if ok {
- b2 := f2.Interface().(raw)
- // RawMessage
- if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
- return false
- }
- continue
- }
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
@@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- if !bytes.Equal(u1, u2) {
- return false
- }
-
- return true
+ return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
@@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
m1, m2 := e1.value, e2.value
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
@@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
desc = m[extNum]
}
if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- continue
+ return false
}
var err error
if m1 == nil {
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index eaad2183..816a3b9d 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -38,6 +38,7 @@ package proto
import (
"errors"
"fmt"
+ "io"
"reflect"
"strconv"
"sync"
@@ -91,14 +92,29 @@ func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, bool) {
- if ep, ok := p.(extendableProto); ok {
- return ep, ok
- }
- if ep, ok := p.(extendableProtoV1); ok {
- return extensionAdapter{ep}, ok
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
}
- return nil, false
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
@@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
return e.p.extensionMap, &e.p.mu
}
-var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
-var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
-
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
@@ -179,8 +192,8 @@ type Extension struct {
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
- epb, ok := extendable(base)
- if !ok {
+ epb, err := extendable(base)
+ if err != nil {
return
}
extmap := epb.extensionsWrite()
@@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
@@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop
}
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensions(e *XXX_InternalExtensions) error {
- m, mu := e.extensionsRead()
- if m == nil {
- return nil // fast path
- }
- mu.Lock()
- defer mu.Unlock()
- return encodeExtensionsMap(m)
-}
-
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensionsMap(m map[int32]Extension) error {
- for k, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- p := NewBuffer(nil)
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- if err := props.enc(p, props, toStructPointer(x)); err != nil {
- return err
- }
- e.enc = p.buf
- m[k] = e
- }
- return nil
-}
-
-func extensionsSize(e *XXX_InternalExtensions) (n int) {
- m, mu := e.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
- defer mu.Unlock()
- return extensionsMapSize(m)
-}
-
-func extensionsMapSize(m map[int32]Extension) (n int) {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- n += props.size(props, toStructPointer(x))
- }
- return
-}
-
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
@@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false
}
mu.Lock()
- _, ok = extmap[extension.Field]
+ _, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
@@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
delete(extmap, extension.Field)
}
-// GetExtension parses and returns the given extension of pb.
-// If the extension is not present and has no default value it returns ErrMissingExtension.
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
}
emap, mu := epb.extensionsRead()
@@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
return e.value, nil
}
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
@@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
@@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- o := NewBuffer(b)
-
t := reflect.TypeOf(extension.ExtensionType)
-
- props := extensionProperties(extension)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate a "field" to store the pointer/slice itself; the
- // pointer/slice will be stored here. We pass
- // the address of this field to props.dec.
- // This passes a zero field and a *t and lets props.dec
- // interpret it as a *struct{ x t }.
+ // Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
+ var err error
for {
- // Discard wire type and field number varint. It isn't needed.
- if _, err := o.DecodeVarint(); err != nil {
- return nil, err
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
}
+ b = b[n:]
+ wire := int(x) & 7
- if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
return nil, err
}
- if o.index >= len(o.buf) {
+ if len(b) == 0 {
break
}
}
@@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
@@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
@@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, ok := extendable(pb)
- if !ok {
- return errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
@@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
m := epb.extensionsWrite()
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index 1c225504..0e2191b8 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -265,6 +265,7 @@ package proto
import (
"encoding/json"
+ "errors"
"fmt"
"log"
"reflect"
@@ -273,6 +274,8 @@ import (
"sync"
)
+var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
@@ -309,16 +312,7 @@ type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
- // pools of basic types to amortize allocation.
- bools []bool
- uint32s []uint32
- uint64s []uint64
-
- // extra pools, only used with pointer_reflect.go
- int32s []int32
- int64s []int64
- float32s []float32
- float64s []float64
+ deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
@@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
@@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
return sf, false, nil
}
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{
- vs: vs,
- // default Less function: textual comparison
- less: func(a, b reflect.Value) bool {
- return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
- },
- }
+ s := mapKeySorter{vs: vs}
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
- // numeric keys are sorted numerically.
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
@@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
@@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
index fd982dec..3b6ca41d 100644
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -42,6 +42,7 @@ import (
"fmt"
"reflect"
"sort"
+ "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
}
func (ms *messageSet) Has(pb Message) bool {
- if ms.find(pb) != nil {
- return true
- }
- return false
+ return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
@@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte {
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
+ return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) {
case *XXX_InternalExtensions:
- if err := encodeExtensions(exts); err != nil {
- return nil, err
- }
- m, _ = exts.extensionsRead()
+ var u marshalInfo
+ siz := u.sizeMessageSet(exts)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, exts, deterministic)
+
case map[int32]Extension:
- if err := encodeExtensionsMap(exts); err != nil {
- return nil, err
+ // This is an old-style extension map.
+ // Wrap it in a new-style XXX_InternalExtensions.
+ ie := XXX_InternalExtensions{
+ p: &struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }{
+ extensionMap: exts,
+ },
}
- m = exts
+
+ var u marshalInfo
+ siz := u.sizeMessageSet(&ie)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, &ie, deterministic)
+
default:
return nil, errors.New("proto: not an extension map")
}
-
- // Sort extension IDs to provide a deterministic encoding.
- // See also enc_map in encode.go.
- ids := make([]int, 0, len(m))
- for id := range m {
- ids = append(ids, int(id))
- }
- sort.Ints(ids)
-
- ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
- for _, id := range ids {
- e := m[int32(id)]
- // Remove the wire type and field number varint, as well as the length varint.
- msg := skipVarint(skipVarint(e.enc))
-
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: Int32(int32(id)),
- Message: msg,
- })
- }
- return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
@@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
- m, _ = exts.extensionsRead()
+ var mu sync.Locker
+ m, mu = exts.extensionsRead()
+ if m != nil {
+ // Keep the extensions map locked until we're done marshaling to prevent
+ // races between marshaling and unmarshaling the lazily-{en,de}coded
+ // values.
+ mu.Lock()
+ defer mu.Unlock()
+ }
case map[int32]Extension:
m = exts
default:
@@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
for i, id := range ids {
ext := m[id]
- if i > 0 {
- b.WriteByte(',')
- }
-
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
+
+ if i > 0 && b.Len() > 1 {
+ b.WriteByte(',')
+ }
+
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index fb512e2e..b6cad908 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build appengine js
+// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -38,32 +38,13 @@
package proto
import (
- "math"
"reflect"
+ "sync"
)
-// A structPointer is a pointer to a struct.
-type structPointer struct {
- v reflect.Value
-}
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-// The reflect value must itself be a pointer to a struct.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer{v}
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p.v.IsNil()
-}
+const unsafeAllowed = false
-// Interface returns the struct pointer as an interface value.
-func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
- return p.v.Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
@@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
var invalidField = field(nil)
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
-// field returns the given field in the struct as a reflect value.
-func structPointer_field(p structPointer, f field) reflect.Value {
- // Special case: an extension map entry with a value of type T
- // passes a *T to the struct-handling code with a zero field,
- // expecting that it will be treated as equivalent to *struct{ X T },
- // which has the same memory layout. We have to handle that case
- // specially, because reflect will panic if we call FieldByIndex on a
- // non-struct.
- if f == nil {
- return p.v.Elem()
- }
-
- return p.v.Elem().FieldByIndex(f)
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
}
-// ifield returns the given field in the struct as an interface value.
-func structPointer_ifield(p structPointer, f field) interface{} {
- return structPointer_field(p, f).Addr().Interface()
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return structPointer_ifield(p, f).(*[]byte)
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return structPointer_ifield(p, f).(*[][]byte)
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
}
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return structPointer_ifield(p, f).(**bool)
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return structPointer_ifield(p, f).(*bool)
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
}
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return structPointer_ifield(p, f).(*[]bool)
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
}
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return structPointer_ifield(p, f).(**string)
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return structPointer_ifield(p, f).(*string)
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return structPointer_ifield(p, f).(*[]string)
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
}
-// Extensions returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return structPointer_ifield(p, f).(*XXX_InternalExtensions)
-}
+var int32ptr = reflect.TypeOf((*int32)(nil))
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return structPointer_ifield(p, f).(*map[int32]Extension)
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
}
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return structPointer_field(p, f).Addr()
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- structPointer_field(p, f).Set(q.v)
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return structPointer{structPointer_field(p, f)}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
}
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
- return structPointerSlice{structPointer_field(p, f)}
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
}
-
-// A structPointerSlice represents the address of a slice of pointers to structs
-// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
-type structPointerSlice struct {
- v reflect.Value
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
}
-func (p structPointerSlice) Len() int { return p.v.Len() }
-func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
-func (p structPointerSlice) Append(q structPointer) {
- p.v.Set(reflect.Append(p.v, q.v))
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
}
-
-var (
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- float32Type = reflect.TypeOf(float32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
-)
-
-// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
-// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
-type word32 struct {
- v reflect.Value
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
}
-
-// IsNil reports whether p is nil.
-func word32_IsNil(p word32) bool {
- return p.v.IsNil()
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
}
-
-// Set sets p to point at a newly allocated word with bits set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- t := p.v.Type().Elem()
- switch t {
- case int32Type:
- if len(o.int32s) == 0 {
- o.int32s = make([]int32, uint32PoolSize)
- }
- o.int32s[0] = int32(x)
- p.v.Set(reflect.ValueOf(&o.int32s[0]))
- o.int32s = o.int32s[1:]
- return
- case uint32Type:
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint32s[0]))
- o.uint32s = o.uint32s[1:]
- return
- case float32Type:
- if len(o.float32s) == 0 {
- o.float32s = make([]float32, uint32PoolSize)
- }
- o.float32s[0] = math.Float32frombits(x)
- p.v.Set(reflect.ValueOf(&o.float32s[0]))
- o.float32s = o.float32s[1:]
- return
- }
-
- // must be enum
- p.v.Set(reflect.New(t))
- p.v.Elem().SetInt(int64(int32(x)))
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32_Get(p word32) uint32 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
}
-
-// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32{structPointer_field(p, f)}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
}
-
-// A word32Val represents a field of type int32, uint32, float32, or enum.
-// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
-type word32Val struct {
- v reflect.Value
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
}
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- switch p.v.Type() {
- case int32Type:
- p.v.SetInt(int64(x))
- return
- case uint32Type:
- p.v.SetUint(uint64(x))
- return
- case float32Type:
- p.v.SetFloat(float64(math.Float32frombits(x)))
- return
- }
-
- // must be enum
- p.v.SetInt(int64(int32(x)))
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32Val_Get(p word32Val) uint32 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
}
-
-// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val{structPointer_field(p, f)}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
}
-
-// A word32Slice is a slice of 32-bit values.
-// That is, v.Type() is []int32, []uint32, []float32, or []enum.
-type word32Slice struct {
- v reflect.Value
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
}
-
-func (p word32Slice) Append(x uint32) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int32:
- elem.SetInt(int64(int32(x)))
- case reflect.Uint32:
- elem.SetUint(uint64(x))
- case reflect.Float32:
- elem.SetFloat(float64(math.Float32frombits(x)))
- }
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
}
-
-func (p word32Slice) Len() int {
- return p.v.Len()
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
}
-
-func (p word32Slice) Index(i int) uint32 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
}
-
-// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) word32Slice {
- return word32Slice{structPointer_field(p, f)}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
}
-
-// word64 is like word32 but for 64-bit values.
-type word64 struct {
- v reflect.Value
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
}
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- t := p.v.Type().Elem()
- switch t {
- case int64Type:
- if len(o.int64s) == 0 {
- o.int64s = make([]int64, uint64PoolSize)
- }
- o.int64s[0] = int64(x)
- p.v.Set(reflect.ValueOf(&o.int64s[0]))
- o.int64s = o.int64s[1:]
- return
- case uint64Type:
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint64s[0]))
- o.uint64s = o.uint64s[1:]
- return
- case float64Type:
- if len(o.float64s) == 0 {
- o.float64s = make([]float64, uint64PoolSize)
- }
- o.float64s[0] = math.Float64frombits(x)
- p.v.Set(reflect.ValueOf(&o.float64s[0]))
- o.float64s = o.float64s[1:]
- return
- }
- panic("unreachable")
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
}
-
-func word64_IsNil(p word64) bool {
- return p.v.IsNil()
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
}
-
-func word64_Get(p word64) uint64 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64{structPointer_field(p, f)}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val struct {
- v reflect.Value
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
}
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- switch p.v.Type() {
- case int64Type:
- p.v.SetInt(int64(x))
- return
- case uint64Type:
- p.v.SetUint(x)
- return
- case float64Type:
- p.v.SetFloat(math.Float64frombits(x))
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
- panic("unreachable")
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
}
-func word64Val_Get(p word64Val) uint64 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
}
- panic("unreachable")
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val{structPointer_field(p, f)}
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
}
-type word64Slice struct {
- v reflect.Value
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
}
-
-func (p word64Slice) Append(x uint64) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int64:
- elem.SetInt(int64(int64(x)))
- case reflect.Uint64:
- elem.SetUint(uint64(x))
- case reflect.Float64:
- elem.SetFloat(float64(math.Float64frombits(x)))
- }
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
-
-func (p word64Slice) Len() int {
- return p.v.Len()
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
}
-
-func (p word64Slice) Index(i int) uint64 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return uint64(elem.Uint())
- case reflect.Float64:
- return math.Float64bits(float64(elem.Float()))
- }
- panic("unreachable")
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
-
-func structPointer_Word64Slice(p structPointer, f field) word64Slice {
- return word64Slice{structPointer_field(p, f)}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index 6b5567d4..d55a335d 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build !appengine,!js
+// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@@ -37,38 +37,13 @@ package proto
import (
"reflect"
+ "sync/atomic"
"unsafe"
)
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-// type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
-
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
- return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
+const unsafeAllowed = true
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
@@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
- return f != ^field(0)
+ return f != invalidField
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
}
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) isNil() bool {
+ return p.p == nil
}
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
}
-
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
}
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
}
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
- return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
}
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
- return *p == nil
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
}
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- *p = &o.uint32s[0]
- o.uint32s = o.uint32s[1:]
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
}
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
- return **p
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- *p = x
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
- return *p
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
-func (v *word32Slice) Len() int { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
- return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- *p = &o.uint64s[0]
- o.uint64s = o.uint64s[1:]
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
}
-
-func word64_IsNil(p word64) bool {
- return *p == nil
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
}
-
-func word64_Get(p word64) uint64 {
- return **p
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
}
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- *p = x
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
}
-func word64Val_Get(p word64Val) uint64 {
- return *p
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
}
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
-func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
-func (v *word64Slice) Len() int { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
- return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index ec2289c0..f710adab 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -58,42 +58,6 @@ const (
WireFixed32 = 5
)
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@@ -140,13 +104,6 @@ type StructProperties struct {
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
- unrecField field // field id of the XXX_unrecognized []byte field
- extendable bool // is this an extendable proto
-
- oneofMarshaler oneofMarshaler
- oneofUnmarshaler oneofUnmarshaler
- oneofSizer oneofSizer
- stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
@@ -187,36 +144,19 @@ type Properties struct {
Default string // default value
HasDefault bool // whether an explicit default was provided
- def_uint64 uint64
-
- enc encoder
- valEnc valueEncoder // set for bool and numeric types only
- field field
- tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
- tagbuf [8]byte
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
- isMarshaler bool
- isUnmarshaler bool
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
-
- size sizer
- valSize valueSizer // set for bool and numeric types only
-
- dec decoder
- valDec valueDecoder // set for bool and numeric types only
-
- // If this is a packable field, this will be the decoder for the packed version of the field.
- packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s = ","
+ s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
@@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
switch p.Wire {
case "varint":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeVarint
- p.valDec = (*Buffer).DecodeVarint
- p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
- p.valEnc = (*Buffer).EncodeFixed32
- p.valDec = (*Buffer).DecodeFixed32
- p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
- p.valEnc = (*Buffer).EncodeFixed64
- p.valDec = (*Buffer).DecodeFixed64
- p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag32
- p.valDec = (*Buffer).DecodeZigzag32
- p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag64
- p.valDec = (*Buffer).DecodeZigzag64
- p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
@@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
return
}
+outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
@@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) {
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
- break
+ break outer
}
}
}
}
-func logNoSliceEnc(t1, t2 reflect.Type) {
- fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- p.enc = nil
- p.dec = nil
- p.size = nil
-
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
- // proto3 scalar types
-
- case reflect.Bool:
- p.enc = (*Buffer).enc_proto3_bool
- p.dec = (*Buffer).dec_proto3_bool
- p.size = size_proto3_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_proto3_int32
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_proto3_uint32
- p.dec = (*Buffer).dec_proto3_int32 // can reuse
- p.size = size_proto3_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_proto3_int64
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.String:
- p.enc = (*Buffer).enc_proto3_string
- p.dec = (*Buffer).dec_proto3_string
- p.size = size_proto3_string
-
case reflect.Ptr:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
- break
- case reflect.Bool:
- p.enc = (*Buffer).enc_bool
- p.dec = (*Buffer).dec_bool
- p.size = size_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_int32
- p.dec = (*Buffer).dec_int32
- p.size = size_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_uint32
- p.dec = (*Buffer).dec_int32 // can reuse
- p.size = size_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_int64
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_int32
- p.size = size_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.String:
- p.enc = (*Buffer).enc_string
- p.dec = (*Buffer).dec_string
- p.size = size_string
- case reflect.Struct:
+ if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
- p.isMarshaler = isMarshaler(t1)
- p.isUnmarshaler = isUnmarshaler(t1)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_struct_message
- p.dec = (*Buffer).dec_struct_message
- p.size = size_struct_message
- } else {
- p.enc = (*Buffer).enc_struct_group
- p.dec = (*Buffer).dec_struct_group
- p.size = size_struct_group
- }
}
case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- logNoSliceEnc(t1, t2)
- break
- case reflect.Bool:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_bool
- p.size = size_slice_packed_bool
- } else {
- p.enc = (*Buffer).enc_slice_bool
- p.size = size_slice_bool
- }
- p.dec = (*Buffer).dec_slice_bool
- p.packedDec = (*Buffer).dec_slice_packed_bool
- case reflect.Int32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int32
- p.size = size_slice_packed_int32
- } else {
- p.enc = (*Buffer).enc_slice_int32
- p.size = size_slice_int32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Uint32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Int64, reflect.Uint64:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- case reflect.Uint8:
- p.dec = (*Buffer).dec_slice_byte
- if p.proto3 {
- p.enc = (*Buffer).enc_proto3_slice_byte
- p.size = size_proto3_slice_byte
- } else {
- p.enc = (*Buffer).enc_slice_byte
- p.size = size_slice_byte
- }
- case reflect.Float32, reflect.Float64:
- switch t2.Bits() {
- case 32:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case 64:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- default:
- logNoSliceEnc(t1, t2)
- break
- }
- case reflect.String:
- p.enc = (*Buffer).enc_slice_string
- p.dec = (*Buffer).dec_slice_string
- p.size = size_slice_string
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
- break
- case reflect.Struct:
- p.stype = t2.Elem()
- p.isMarshaler = isMarshaler(t2)
- p.isUnmarshaler = isUnmarshaler(t2)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_slice_struct_message
- p.dec = (*Buffer).dec_slice_struct_message
- p.size = size_slice_struct_message
- } else {
- p.enc = (*Buffer).enc_slice_struct_group
- p.dec = (*Buffer).dec_slice_struct_group
- p.size = size_slice_struct_group
- }
- }
- case reflect.Slice:
- switch t2.Elem().Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
- break
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_slice_byte
- p.dec = (*Buffer).dec_slice_slice_byte
- p.size = size_slice_slice_byte
- }
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
}
case reflect.Map:
- p.enc = (*Buffer).enc_new_map
- p.dec = (*Buffer).dec_new_map
- p.size = size_new_map
-
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
@@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
- // precalculate tag code
- wire := p.WireType
- if p.Packed {
- wire = WireBytes
- }
- x := uint32(p.Tag)<<3 | uint32(wire)
- i := 0
- for i = 0; x > 127; i++ {
- p.tagbuf[i] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- p.tagbuf[i] = uint8(x)
- p.tagcode = p.tagbuf[0 : i+1]
-
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
@@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
}
var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
- unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isMarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isMarshaler")
- }
- return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isUnmarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isUnmarshaler")
- }
- return t.Implements(unmarshalerType)
-}
-
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
@@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
- if f != nil {
- p.field = toField(f)
- }
if tag == "" {
return
}
p.Parse(tag)
- p.setEncAndDec(typ, f, lockGetProp)
+ p.setFieldProps(typ, f, lockGetProp)
}
var (
@@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
propertiesMap[t] = prop
// build properties
- prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
- reflect.PtrTo(t).Implements(extendableProtoV1Type)
- prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
@@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
- if f.Name == "XXX_InternalExtensions" { // special case
- p.enc = (*Buffer).enc_exts
- p.dec = nil // not needed
- p.size = size_exts
- } else if f.Name == "XXX_extensions" { // special case
- p.enc = (*Buffer).enc_map
- p.dec = nil // not needed
- p.size = size_map
- } else if f.Name == "XXX_unrecognized" { // special case
- prop.unrecField = toField(&f)
- }
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
@@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
- fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
- }
}
// Re-order prop.order.
@@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
- prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
- prop.stype = t
+ _, _, _, oots = om.XXX_OneofFuncs()
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
@@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
return prop
}
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
- if len(x) != 1 {
- fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
- return nil
- }
- prop := GetProperties(t)
- return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
- if pb == nil {
- err = ErrNil
- return
- }
- // get the reflect type of the pointer to the struct.
- t = reflect.TypeOf(pb)
- // get the address of the struct.
- value := reflect.ValueOf(pb)
- b = toStructPointer(value)
- return
-}
-
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
@@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 {
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
- protoTypes = make(map[string]reflect.Type)
- revProtoTypes = make(map[reflect.Type]string)
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
- if _, ok := protoTypes[name]; ok {
+ if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
- protoTypes[name] = t
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
revProtoTypes[t] = name
}
@@ -855,7 +520,14 @@ func MessageName(x Message) string {
}
// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
// A registry of all linked proto files.
var (
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 00000000..0f212b30
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2681 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errreq error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required && errreq == nil {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ errreq = &RequiredNotSetError{f.name}
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errreq
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != nil && err != ErrNil { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 00000000..5525def6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 00000000..55f0340a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,1967 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ rnse = r
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if rnse != nil {
+ // A required field of a submessage/group is missing. Return that error.
+ return rnse
+ }
+ if reqMask != u.reqMask {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ return &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return nil
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask)
+ }
+
+ // Find any types associated with oneof fields.
+ // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+ if fn.IsValid() {
+ res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+ for i := res.Len() - 1; i >= 0; i-- {
+ v := res.Index(i) // interface{}
+ tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
+ tag, err := strconv.Atoi(tagstr)
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagstr)
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(tag, of.field, unmarshal, 0)
+ }
+ }
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0)
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if err == nil {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nil
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nil
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) <= 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
index 965876bf..2205fdaa 100644
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -50,7 +50,6 @@ import (
var (
newline = []byte("\n")
spaces = []byte(" ")
- gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
@@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
return nil
}
-// raw is the interface satisfied by RawMessage.
-type raw interface {
- Bytes() []byte
-}
-
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
@@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
props := sprops.Prop[i]
name := st.Field(i).Name
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
@@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if b, ok := fv.Interface().(raw); ok {
- if err := writeRaw(w, b.Bytes()); err != nil {
- return err
- }
- continue
- }
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
@@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field).
pv := sv.Addr()
- if _, ok := extendable(pv.Interface()); ok {
+ if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
@@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
-// writeRaw writes an uninterpreted raw message.
-func writeRaw(w *textWriter, b []byte) error {
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if err := writeUnknownStruct(w, b); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- return nil
-}
-
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
}
w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
@@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if _, err = w.Write(text); err != nil {
return err
}
- } else if err := tm.writeStruct(w, v); err != nil {
- return err
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 5e14513f..0685bae3 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -206,7 +206,6 @@ func (p *textParser) advance() {
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
- errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
@@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
- base := 8
- ss := s[:2]
+ ss := string(r) + s[:2]
s = s[2:]
- if r == 'x' || r == 'X' {
- base = 16
- } else {
- ss = string(r) + ss
- }
- i, err := strconv.ParseUint(ss, base, 8)
+ i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
- return "", "", err
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
- case 'u', 'U':
- n := 4
- if r == 'U' {
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
n = 8
}
if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
- }
-
- bs := make([]byte, n/2)
- for i := 0; i < n; i += 2 {
- a, ok1 := unhex(s[i])
- b, ok2 := unhex(s[i+1])
- if !ok1 || !ok2 {
- return "", "", errBadHex
- }
- bs[i/2] = a<<4 | b
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
+ ss := s[:n]
s = s[n:]
- return string(bs), s, nil
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
-// Adapted from src/pkg/strconv/quote.go.
-func unhex(b byte) (v byte, ok bool) {
- switch {
- case '0' <= b && b <= '9':
- return b - '0', true
- case 'a' <= b && b <= 'f':
- return b - 'a' + 10, true
- case 'A' <= b && b <= 'F':
- return b - 'A' + 10, true
- }
- return 0, false
-}
-
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
@@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
}
return strings.Join(parts, ""), nil
}
@@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(x)
+ fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
@@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
- err := um.UnmarshalText([]byte(s))
- return err
+ return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
- if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
- return pe
- }
- return nil
+ return newTextParser(s).readStruct(v.Elem(), "")
}
diff --git a/vendor/github.com/jtolds/gls/README.md b/vendor/github.com/jtolds/gls/README.md
deleted file mode 100644
index 4ebb692f..00000000
--- a/vendor/github.com/jtolds/gls/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-gls
-===
-
-Goroutine local storage
-
-### IMPORTANT NOTE ###
-
-It is my duty to point you to https://blog.golang.org/context, which is how
-Google solves all of the problems you'd perhaps consider using this package
-for at scale.
-
-One downside to Google's approach is that *all* of your functions must have
-a new first argument, but after clearing that hurdle everything else is much
-better.
-
-If you aren't interested in this warning, read on.
-
-### Huhwaht? Why? ###
-
-Every so often, a thread shows up on the
-[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some
-form of goroutine-local-storage, or some kind of goroutine id, or some kind of
-context. There are a few valid use cases for goroutine-local-storage, one of
-the most prominent being log line context. One poster was interested in being
-able to log an HTTP request context id in every log line in the same goroutine
-as the incoming HTTP request, without having to change every library and
-function call he was interested in logging.
-
-This would be pretty useful. Provided that you could get some kind of
-goroutine-local-storage, you could call
-[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging
-writer that checks goroutine-local-storage for some context information and
-adds that context to your log lines.
-
-But alas, Andrew Gerrand's typically diplomatic answer to the question of
-goroutine-local variables was:
-
-> We wouldn't even be having this discussion if thread local storage wasn't
-> useful. But every feature comes at a cost, and in my opinion the cost of
-> threadlocals far outweighs their benefits. They're just not a good fit for
-> Go.
-
-So, yeah, that makes sense. That's a pretty good reason for why the language
-won't support a specific and (relatively) unuseful feature that requires some
-runtime changes, just for the sake of a little bit of log improvement.
-
-But does Go require runtime changes?
-
-### How it works ###
-
-Go has pretty fantastic introspective and reflective features, but one thing Go
-doesn't give you is any kind of access to the stack pointer, or frame pointer,
-or goroutine id, or anything contextual about your current stack. It gives you
-access to your list of callers, but only along with program counters, which are
-fixed at compile time.
-
-But it does give you the stack.
-
-So, we define 16 special functions and embed base-16 tags into the stack using
-the call order of those 16 functions. Then, we can read our tags back out of
-the stack looking at the callers list.
-
-We then use these tags as an index into a traditional map for implementing
-this library.
-
-### What are people saying? ###
-
-"Wow, that's horrifying."
-
-"This is the most terrible thing I have seen in a very long time."
-
-"Where is it getting a context from? Is this serializing all the requests?
-What the heck is the client being bound to? What are these tags? Why does he
-need callers? Oh god no. No no no."
-
-### Docs ###
-
-Please see the docs at http://godoc.org/github.com/jtolds/gls
-
-### Related ###
-
-If you're okay relying on the string format of the current runtime stacktrace
-including a unique goroutine id (not guaranteed by the spec or anything, but
-very unlikely to change within a Go release), you might be able to squeeze
-out a bit more performance by using this similar library, inspired by some
-code Brad Fitzpatrick wrote for debugging his HTTP/2 library:
-https://github.com/tylerb/gls (in contrast, jtolds/gls doesn't require
-any knowledge of the string format of the runtime stacktrace, which
-probably adds unnecessary overhead).
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
deleted file mode 100644
index 81be2143..00000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
- go test -cover -v -coverprofile=cover.dat ./...
- go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/percona/exporter_shared/NOTICE b/vendor/github.com/percona/exporter_shared/NOTICE
deleted file mode 100644
index d3b84271..00000000
--- a/vendor/github.com/percona/exporter_shared/NOTICE
+++ /dev/null
@@ -1,2 +0,0 @@
-Shared code for Percona Prometheus exporters.
-Copyright 2017 Percona LLC
diff --git a/vendor/github.com/percona/exporter_shared/helpers/helpers.go b/vendor/github.com/percona/exporter_shared/helpers/helpers.go
deleted file mode 100644
index 6c15b900..00000000
--- a/vendor/github.com/percona/exporter_shared/helpers/helpers.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2017 Percona LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package helpers provides test helpers for Prometheus exporters.
-//
-// It contains workarounds for the following issues:
-// * https://github.com/prometheus/client_golang/issues/322
-// * https://github.com/prometheus/client_golang/issues/323
-package helpers
-
-import (
- "fmt"
- "regexp"
-
- "github.com/prometheus/client_golang/prometheus"
- dto "github.com/prometheus/client_model/go"
-)
-
-var nameAndHelpRE = regexp.MustCompile(`fqName: "(\w+)", help: "([^"]+)"`)
-
-func getNameAndHelp(d *prometheus.Desc) (string, string) {
- m := nameAndHelpRE.FindStringSubmatch(d.String())
- if len(m) != 3 {
- panic(fmt.Sprintf("failed to get metric name and help from %#q: %#v", d.String(), m))
- }
- return m[1], m[2]
-}
-
-// Metric contains Prometheus metric details.
-type Metric struct {
- Name string
- Help string
- Labels prometheus.Labels
- Type dto.MetricType
- Value float64
-}
-
-// ReadMetric extracts details from Prometheus metric.
-func ReadMetric(m prometheus.Metric) *Metric {
- pb := &dto.Metric{}
- if err := m.Write(pb); err != nil {
- panic(err)
- }
-
- name, help := getNameAndHelp(m.Desc())
- labels := make(prometheus.Labels, len(pb.Label))
- for _, v := range pb.Label {
- labels[v.GetName()] = v.GetValue()
- }
- if pb.Gauge != nil {
- return &Metric{name, help, labels, dto.MetricType_GAUGE, pb.GetGauge().GetValue()}
- }
- if pb.Counter != nil {
- return &Metric{name, help, labels, dto.MetricType_COUNTER, pb.GetCounter().GetValue()}
- }
- if pb.Untyped != nil {
- return &Metric{name, help, labels, dto.MetricType_UNTYPED, pb.GetUntyped().GetValue()}
- }
- panic("Unsupported metric type")
-}
diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md
new file mode 100644
index 00000000..c5275d5a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md
@@ -0,0 +1,18 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Bernerd Schaefer
+* Björn Rabenstein
+* Daniel Bornkessel
+* Jeff Younker
+* Julius Volz
+* Matt T. Proud
+* Tobias Schmidt
+
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
deleted file mode 100644
index 44986bff..00000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ /dev/null
@@ -1 +0,0 @@
-See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index 3116d2f7..b6dd5a26 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -76,7 +76,7 @@ func Handler() http.Handler {
}
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
-// ef the Handler is defined by the provided HandlerOpts.
+// of the Handler is defined by the provided HandlerOpts.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := reg.Gather()
diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE
new file mode 100644
index 00000000..11069edd
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
deleted file mode 100644
index 7723656d..00000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-PACKAGE
-
-package goautoneg
-import "bitbucket.org/ww/goautoneg"
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-FUNCTIONS
-
-func Negotiate(header string, alternatives []string) (content_type string)
-Negotiate the most appropriate content_type given the accept header
-and a list of alternatives.
-
-func ParseAccept(header string) (accept []Accept)
-Parse an Accept Header string returning a sorted list
-of clauses
-
-
-TYPES
-
-type Accept struct {
- Type, SubType string
- Q float32
- Params map[string]string
-}
-Structure to represent a clause in an HTTP Accept Header
-
-
-SUBDIRECTORIES
-
- .hg
diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
index 6d41284c..bcf68e6f 100644
--- a/vendor/github.com/prometheus/common/log/eventlog_formatter.go
+++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go
@@ -21,22 +21,22 @@ import (
"golang.org/x/sys/windows/svc/eventlog"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
func init() {
- setEventlogFormatter = func(name string, debugAsInfo bool) error {
+ setEventlogFormatter = func(l logger, name string, debugAsInfo bool) error {
if name == "" {
return fmt.Errorf("missing name parameter")
}
- fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter)
+ fmter, err := newEventlogger(name, debugAsInfo, l.entry.Logger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err)
- origLogger.Errorf("can't connect logger to eventlog: %v", err)
+ l.Errorf("can't connect logger to eventlog: %v", err)
return err
}
- origLogger.Formatter = fmter
+ l.entry.Logger.Formatter = fmter
return nil
}
}
diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go
index efad4842..1321741a 100644
--- a/vendor/github.com/prometheus/common/log/log.go
+++ b/vendor/github.com/prometheus/common/log/log.go
@@ -25,14 +25,14 @@ import (
"strconv"
"strings"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
type levelFlag string
// String implements flag.Value.
func (f levelFlag) String() string {
- return fmt.Sprintf("%q", string(f))
+ return fmt.Sprintf("%q", origLogger.Level.String())
}
// Set implements flag.Value.
@@ -46,10 +46,10 @@ func (f levelFlag) Set(level string) error {
}
// setSyslogFormatter is nil if the target architecture does not support syslog.
-var setSyslogFormatter func(string, string) error
+var setSyslogFormatter func(logger, string, string) error
// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).
-var setEventlogFormatter func(string, bool) error
+var setEventlogFormatter func(logger, string, bool) error
func setJSONFormatter() {
origLogger.Formatter = &logrus.JSONFormatter{}
@@ -65,45 +65,7 @@ func (f logFormatFlag) String() string {
// Set implements flag.Value.
func (f logFormatFlag) Set(format string) error {
- u, err := url.Parse(format)
- if err != nil {
- return err
- }
- if u.Scheme != "logger" {
- return fmt.Errorf("invalid scheme %s", u.Scheme)
- }
- jsonq := u.Query().Get("json")
- if jsonq == "true" {
- setJSONFormatter()
- }
-
- switch u.Opaque {
- case "syslog":
- if setSyslogFormatter == nil {
- return fmt.Errorf("system does not support syslog")
- }
- appname := u.Query().Get("appname")
- facility := u.Query().Get("local")
- return setSyslogFormatter(appname, facility)
- case "eventlog":
- if setEventlogFormatter == nil {
- return fmt.Errorf("system does not support eventlog")
- }
- name := u.Query().Get("name")
- debugAsInfo := false
- debugAsInfoRaw := u.Query().Get("debugAsInfo")
- if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
- debugAsInfo = parsedDebugAsInfo
- }
- return setEventlogFormatter(name, debugAsInfo)
- case "stdout":
- origLogger.Out = os.Stdout
- case "stderr":
- origLogger.Out = os.Stderr
- default:
- return fmt.Errorf("unsupported logger %q", u.Opaque)
- }
- return nil
+ return baseLogger.SetFormat(format)
}
func init() {
@@ -150,6 +112,9 @@ type Logger interface {
Fatalf(string, ...interface{})
With(key string, value interface{}) Logger
+
+ SetFormat(string) error
+ SetLevel(string) error
}
type logger struct {
@@ -235,6 +200,58 @@ func (l logger) Fatalf(format string, args ...interface{}) {
l.sourced().Fatalf(format, args...)
}
+func (l logger) SetLevel(level string) error {
+ lvl, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+
+ l.entry.Logger.Level = lvl
+ return nil
+}
+
+func (l logger) SetFormat(format string) error {
+ u, err := url.Parse(format)
+ if err != nil {
+ return err
+ }
+ if u.Scheme != "logger" {
+ return fmt.Errorf("invalid scheme %s", u.Scheme)
+ }
+ jsonq := u.Query().Get("json")
+ if jsonq == "true" {
+ setJSONFormatter()
+ }
+
+ switch u.Opaque {
+ case "syslog":
+ if setSyslogFormatter == nil {
+ return fmt.Errorf("system does not support syslog")
+ }
+ appname := u.Query().Get("appname")
+ facility := u.Query().Get("local")
+ return setSyslogFormatter(l, appname, facility)
+ case "eventlog":
+ if setEventlogFormatter == nil {
+ return fmt.Errorf("system does not support eventlog")
+ }
+ name := u.Query().Get("name")
+ debugAsInfo := false
+ debugAsInfoRaw := u.Query().Get("debugAsInfo")
+ if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {
+ debugAsInfo = parsedDebugAsInfo
+ }
+ return setEventlogFormatter(l, name, debugAsInfo)
+ case "stdout":
+ l.entry.Logger.Out = os.Stdout
+ case "stderr":
+ l.entry.Logger.Out = os.Stderr
+ default:
+ return fmt.Errorf("unsupported logger %q", u.Opaque)
+ }
+ return nil
+}
+
// sourced adds a source field to the logger that contains
// the file name and line where the logging happened.
func (l logger) sourced() *logrus.Entry {
diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go
index 64f5fdac..f882f2f8 100644
--- a/vendor/github.com/prometheus/common/log/syslog_formatter.go
+++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go
@@ -20,13 +20,13 @@ import (
"log/syslog"
"os"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var _ logrus.Formatter = (*syslogger)(nil)
func init() {
- setSyslogFormatter = func(appname, local string) error {
+ setSyslogFormatter = func(l logger, appname, local string) error {
if appname == "" {
return fmt.Errorf("missing appname parameter")
}
@@ -34,13 +34,13 @@ func init() {
return fmt.Errorf("missing local parameter")
}
- fmter, err := newSyslogger(appname, local, origLogger.Formatter)
+ fmter, err := newSyslogger(appname, local, l.entry.Logger.Formatter)
if err != nil {
fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err)
- origLogger.Errorf("can't connect logger to syslog: %v", err)
+ l.entry.Errorf("can't connect logger to syslog: %v", err)
return err
}
- origLogger.Formatter = fmter
+ l.entry.Logger.Formatter = fmter
return nil
}
}
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
deleted file mode 100644
index 0c802dd8..00000000
--- a/vendor/github.com/prometheus/procfs/AUTHORS.md
+++ /dev/null
@@ -1,20 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Tobias Schmidt
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Armen Baghumian
-* Bjoern Rabenstein
-* David Cournapeau
-* Ji-Hoon, Seol
-* Jonas Große Sundrup
-* Julius Volz
-* Matthias Rampke
-* Nicky Gerritsen
-* Rémi Audebert
-* Tobias Schmidt
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
deleted file mode 100644
index 5705f0fb..00000000
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Contributing
-
-Prometheus uses GitHub to manage reviews of pull requests.
-
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
-
-* If you plan to do something more involved, first discuss your ideas
- on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
- This will avoid unnecessary work and surely give you and us a good deal
- of inspiration.
-
-* Relevant coding style guidelines are the [Go Code Review
- Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
- and the _Formatting and style_ section of Peter Bourgon's [Go: Best
- Practices for Production
- Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
deleted file mode 100644
index c264a49d..00000000
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-ci:
- ! gofmt -l *.go | read nothing
- go vet
- go test -v ./...
- go get github.com/golang/lint/golint
- golint *.go
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
deleted file mode 100644
index 6e7ee6b8..00000000
--- a/vendor/github.com/prometheus/procfs/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# procfs
-
-This procfs package provides functions to retrieve system, kernel and process
-metrics from the pseudo-filesystem proc.
-
-*WARNING*: This package is a work in progress. Its API may still break in
-backwards-incompatible ways without warnings. Use it at your own risk.
-
-[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
-[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 00000000..d3a82680
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 49aaab05..b6c6b2ce 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -1,9 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"fmt"
"os"
"path"
+
+ "github.com/prometheus/procfs/nfs"
+ "github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@@ -31,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+ f, err := os.Open(fs.Path("fs/xfs/stat"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return xfs.ParseStats(f)
+}
+
+// NFSClientRPCStats retrieves NFS client RPC statistics.
+func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfsd"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseServerRPCStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 00000000..1ad21c91
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import "strconv"
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index e7012f73..e36d4a3b 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -31,14 +44,16 @@ type IPVSStats struct {
type IPVSBackendStatus struct {
// The local (virtual) IP address.
LocalAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The transport protocol (TCP, UDP).
- Proto string
// The remote (real) IP address.
RemoteAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
// The remote (real) port.
RemotePort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
// The current number of active connections for this virtual/real address pair.
ActiveConn uint64
// The current number of inactive connections for this virtual/real address pair.
@@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status []IPVSBackendStatus
scanner = bufio.NewScanner(file)
proto string
+ localMark string
localAddress net.IP
localPort uint16
err error
)
for scanner.Scan() {
- fields := strings.Fields(string(scanner.Text()))
+ fields := strings.Fields(scanner.Text())
if len(fields) == 0 {
continue
}
@@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
continue
}
proto = fields[0]
+ localMark = ""
localAddress, localPort, err = parseIPPort(fields[1])
if err != nil {
return nil, err
}
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
case fields[0] == "->":
if len(fields) < 6 {
continue
@@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status = append(status, IPVSBackendStatus{
LocalAddress: localAddress,
LocalPort: localPort,
+ LocalMark: localMark,
RemoteAddress: remoteAddress,
RemotePort: remotePort,
Proto: proto,
@@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
}
func parseIPPort(s string) (net.IP, uint16, error) {
- tmp := strings.SplitN(s, ":", 2)
-
- if len(tmp) != 2 {
- return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
- }
+ var (
+ ip net.IP
+ err error
+ )
- if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
- return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
}
- ip, err := hex.DecodeString(tmp[0])
- if err != nil {
- return nil, 0, err
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
}
-
- port, err := strconv.ParseUint(tmp[1], 16, 16)
+ port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index d7a248c0..9dc19583 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 00000000..e95ddbc6
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,569 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10Len = 10
+ fieldTransport11Len = 13
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[2:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ switch statVersion {
+ case statVersion10:
+ if len(ss) != fieldTransport10Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ if len(ss) != fieldTransport11Len {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11Len)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ return &NFSTransportStats{
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 00000000..3f252337
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,216 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+ Name string `json:"name"` // The name of the interface.
+ RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
+ RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
+ RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
+ RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
+ RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
+ RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
+ RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+ RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
+ TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
+ TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
+ TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
+ TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
+ TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
+ TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+ TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
+ TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func NewNetDev() (NetDev, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewNetDev()
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NewNetDev() (NetDev, error) {
+ return newNetDev(fs.Path("net/dev"))
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NewNetDev() (NetDev, error) {
+ return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return NetDev{}, err
+ }
+ defer f.Close()
+
+ nd := NetDev{}
+ s := bufio.NewScanner(f)
+ for n := 0; s.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line, err := nd.parseLine(s.Text())
+ if err != nil {
+ return nd, err
+ }
+
+ nd[line.Name] = *line
+ }
+
+ return nd, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+ parts := strings.SplitN(rawLine, ":", 2)
+ if len(parts) != 2 {
+ return nil, errors.New("invalid net/dev line, missing colon")
+ }
+ fields := strings.Fields(strings.TrimSpace(parts[1]))
+
+ var err error
+ line := &NetDevLine{}
+
+ // Interface Name
+ line.Name = strings.TrimSpace(parts[0])
+ if line.Name == "" {
+ return nil, errors.New("invalid net/dev line, empty interface name")
+ }
+
+ // RX
+ line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // TX
+ line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (nd NetDev) Total() NetDevLine {
+ total := NetDevLine{}
+
+ names := make([]string, 0, len(nd))
+ for _, ifc := range nd {
+ names = append(names, ifc.Name)
+ total.RxBytes += ifc.RxBytes
+ total.RxPackets += ifc.RxPackets
+ total.RxPackets += ifc.RxPackets
+ total.RxErrors += ifc.RxErrors
+ total.RxDropped += ifc.RxDropped
+ total.RxFIFO += ifc.RxFIFO
+ total.RxFrame += ifc.RxFrame
+ total.RxCompressed += ifc.RxCompressed
+ total.RxMulticast += ifc.RxMulticast
+ total.TxBytes += ifc.TxBytes
+ total.TxPackets += ifc.TxPackets
+ total.TxErrors += ifc.TxErrors
+ total.TxDropped += ifc.TxDropped
+ total.TxFIFO += ifc.TxFIFO
+ total.TxCollisions += ifc.TxCollisions
+ total.TxCarrier += ifc.TxCarrier
+ total.TxCompressed += ifc.TxCompressed
+ }
+ sort.Strings(names)
+ total.Name = strings.Join(names, ", ")
+
+ return total
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go
new file mode 100644
index 00000000..651bf681
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go
@@ -0,0 +1,263 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package nfs implements parsing of /proc/net/rpc/nfsd.
+// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
+package nfs
+
+// ReplyCache models the "rc" line.
+type ReplyCache struct {
+ Hits uint64
+ Misses uint64
+ NoCache uint64
+}
+
+// FileHandles models the "fh" line.
+type FileHandles struct {
+ Stale uint64
+ TotalLookups uint64
+ AnonLookups uint64
+ DirNoCache uint64
+ NoDirNoCache uint64
+}
+
+// InputOutput models the "io" line.
+type InputOutput struct {
+ Read uint64
+ Write uint64
+}
+
+// Threads models the "th" line.
+type Threads struct {
+ Threads uint64
+ FullCnt uint64
+}
+
+// ReadAheadCache models the "ra" line.
+type ReadAheadCache struct {
+ CacheSize uint64
+ CacheHistogram []uint64
+ NotFound uint64
+}
+
+// Network models the "net" line.
+type Network struct {
+ NetCount uint64
+ UDPCount uint64
+ TCPCount uint64
+ TCPConnect uint64
+}
+
+// ClientRPC models the nfs "rpc" line.
+type ClientRPC struct {
+ RPCCount uint64
+ Retransmissions uint64
+ AuthRefreshes uint64
+}
+
+// ServerRPC models the nfsd "rpc" line.
+type ServerRPC struct {
+ RPCCount uint64
+ BadCnt uint64
+ BadFmt uint64
+ BadAuth uint64
+ BadcInt uint64
+}
+
+// V2Stats models the "proc2" line.
+type V2Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Root uint64
+ Lookup uint64
+ ReadLink uint64
+ Read uint64
+ WrCache uint64
+ Write uint64
+ Create uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ SymLink uint64
+ MkDir uint64
+ RmDir uint64
+ ReadDir uint64
+ FsStat uint64
+}
+
+// V3Stats models the "proc3" line.
+type V3Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Lookup uint64
+ Access uint64
+ ReadLink uint64
+ Read uint64
+ Write uint64
+ Create uint64
+ MkDir uint64
+ SymLink uint64
+ MkNod uint64
+ Remove uint64
+ RmDir uint64
+ Rename uint64
+ Link uint64
+ ReadDir uint64
+ ReadDirPlus uint64
+ FsStat uint64
+ FsInfo uint64
+ PathConf uint64
+ Commit uint64
+}
+
+// ClientV4Stats models the nfs "proc4" line.
+type ClientV4Stats struct {
+ Null uint64
+ Read uint64
+ Write uint64
+ Commit uint64
+ Open uint64
+ OpenConfirm uint64
+ OpenNoattr uint64
+ OpenDowngrade uint64
+ Close uint64
+ Setattr uint64
+ FsInfo uint64
+ Renew uint64
+ SetClientID uint64
+ SetClientIDConfirm uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Access uint64
+ Getattr uint64
+ Lookup uint64
+ LookupRoot uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ Symlink uint64
+ Create uint64
+ Pathconf uint64
+ StatFs uint64
+ ReadLink uint64
+ ReadDir uint64
+ ServerCaps uint64
+ DelegReturn uint64
+ GetACL uint64
+ SetACL uint64
+ FsLocations uint64
+ ReleaseLockowner uint64
+ Secinfo uint64
+ FsidPresent uint64
+ ExchangeID uint64
+ CreateSession uint64
+ DestroySession uint64
+ Sequence uint64
+ GetLeaseTime uint64
+ ReclaimComplete uint64
+ LayoutGet uint64
+ GetDeviceInfo uint64
+ LayoutCommit uint64
+ LayoutReturn uint64
+ SecinfoNoName uint64
+ TestStateID uint64
+ FreeStateID uint64
+ GetDeviceList uint64
+ BindConnToSession uint64
+ DestroyClientID uint64
+ Seek uint64
+ Allocate uint64
+ DeAllocate uint64
+ LayoutStats uint64
+ Clone uint64
+}
+
+// ServerV4Stats models the nfsd "proc4" line.
+type ServerV4Stats struct {
+ Null uint64
+ Compound uint64
+}
+
+// V4Ops models the "proc4ops" line: NFSv4 operations
+// Variable list, see:
+// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
+// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
+// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
+type V4Ops struct {
+ //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
+ Op0Unused uint64
+ Op1Unused uint64
+ Op2Future uint64
+ Access uint64
+ Close uint64
+ Commit uint64
+ Create uint64
+ DelegPurge uint64
+ DelegReturn uint64
+ GetAttr uint64
+ GetFH uint64
+ Link uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Lookup uint64
+ LookupRoot uint64
+ Nverify uint64
+ Open uint64
+ OpenAttr uint64
+ OpenConfirm uint64
+ OpenDgrd uint64
+ PutFH uint64
+ PutPubFH uint64
+ PutRootFH uint64
+ Read uint64
+ ReadDir uint64
+ ReadLink uint64
+ Remove uint64
+ Rename uint64
+ Renew uint64
+ RestoreFH uint64
+ SaveFH uint64
+ SecInfo uint64
+ SetAttr uint64
+ Verify uint64
+ Write uint64
+ RelLockOwner uint64
+}
+
+// ClientRPCStats models all stats from /proc/net/rpc/nfs.
+type ClientRPCStats struct {
+ Network Network
+ ClientRPC ClientRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ClientV4Stats ClientV4Stats
+}
+
+// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
+type ServerRPCStats struct {
+ ReplyCache ReplyCache
+ FileHandles FileHandles
+ InputOutput InputOutput
+ Threads Threads
+ ReadAheadCache ReadAheadCache
+ Network Network
+ ServerRPC ServerRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ServerV4Stats ServerV4Stats
+ V4Ops V4Ops
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go
new file mode 100644
index 00000000..95a83cc5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "fmt"
+)
+
+func parseReplyCache(v []uint64) (ReplyCache, error) {
+ if len(v) != 3 {
+ return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
+ }
+
+ return ReplyCache{
+ Hits: v[0],
+ Misses: v[1],
+ NoCache: v[2],
+ }, nil
+}
+
+func parseFileHandles(v []uint64) (FileHandles, error) {
+ if len(v) != 5 {
+ return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
+ }
+
+ return FileHandles{
+ Stale: v[0],
+ TotalLookups: v[1],
+ AnonLookups: v[2],
+ DirNoCache: v[3],
+ NoDirNoCache: v[4],
+ }, nil
+}
+
+func parseInputOutput(v []uint64) (InputOutput, error) {
+ if len(v) != 2 {
+ return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
+ }
+
+ return InputOutput{
+ Read: v[0],
+ Write: v[1],
+ }, nil
+}
+
+func parseThreads(v []uint64) (Threads, error) {
+ if len(v) != 2 {
+ return Threads{}, fmt.Errorf("invalid Threads line %q", v)
+ }
+
+ return Threads{
+ Threads: v[0],
+ FullCnt: v[1],
+ }, nil
+}
+
+func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
+ if len(v) != 12 {
+ return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
+ }
+
+ return ReadAheadCache{
+ CacheSize: v[0],
+ CacheHistogram: v[1:11],
+ NotFound: v[11],
+ }, nil
+}
+
+func parseNetwork(v []uint64) (Network, error) {
+ if len(v) != 4 {
+ return Network{}, fmt.Errorf("invalid Network line %q", v)
+ }
+
+ return Network{
+ NetCount: v[0],
+ UDPCount: v[1],
+ TCPCount: v[2],
+ TCPConnect: v[3],
+ }, nil
+}
+
+func parseServerRPC(v []uint64) (ServerRPC, error) {
+ if len(v) != 5 {
+ return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ServerRPC{
+ RPCCount: v[0],
+ BadCnt: v[1],
+ BadFmt: v[2],
+ BadAuth: v[3],
+ BadcInt: v[4],
+ }, nil
+}
+
+func parseClientRPC(v []uint64) (ClientRPC, error) {
+ if len(v) != 3 {
+ return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ClientRPC{
+ RPCCount: v[0],
+ Retransmissions: v[1],
+ AuthRefreshes: v[2],
+ }, nil
+}
+
+func parseV2Stats(v []uint64) (V2Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 18 {
+ return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
+ }
+
+ return V2Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Root: v[4],
+ Lookup: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ WrCache: v[8],
+ Write: v[9],
+ Create: v[10],
+ Remove: v[11],
+ Rename: v[12],
+ Link: v[13],
+ SymLink: v[14],
+ MkDir: v[15],
+ RmDir: v[16],
+ ReadDir: v[17],
+ FsStat: v[18],
+ }, nil
+}
+
+func parseV3Stats(v []uint64) (V3Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 22 {
+ return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
+ }
+
+ return V3Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Lookup: v[4],
+ Access: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ Write: v[8],
+ Create: v[9],
+ MkDir: v[10],
+ SymLink: v[11],
+ MkNod: v[12],
+ Remove: v[13],
+ RmDir: v[14],
+ Rename: v[15],
+ Link: v[16],
+ ReadDir: v[17],
+ ReadDirPlus: v[18],
+ FsStat: v[19],
+ FsInfo: v[20],
+ PathConf: v[21],
+ Commit: v[22],
+ }, nil
+}
+
+func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values {
+ return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
+ }
+
+ // This function currently supports mapping 59 NFS v4 client stats. Older
+ // kernels may emit fewer stats, so we must detect this and pad out the
+ // values to match the expected slice size.
+ if values < 59 {
+ newValues := make([]uint64, 60)
+ copy(newValues, v)
+ v = newValues
+ }
+
+ return ClientV4Stats{
+ Null: v[1],
+ Read: v[2],
+ Write: v[3],
+ Commit: v[4],
+ Open: v[5],
+ OpenConfirm: v[6],
+ OpenNoattr: v[7],
+ OpenDowngrade: v[8],
+ Close: v[9],
+ Setattr: v[10],
+ FsInfo: v[11],
+ Renew: v[12],
+ SetClientID: v[13],
+ SetClientIDConfirm: v[14],
+ Lock: v[15],
+ Lockt: v[16],
+ Locku: v[17],
+ Access: v[18],
+ Getattr: v[19],
+ Lookup: v[20],
+ LookupRoot: v[21],
+ Remove: v[22],
+ Rename: v[23],
+ Link: v[24],
+ Symlink: v[25],
+ Create: v[26],
+ Pathconf: v[27],
+ StatFs: v[28],
+ ReadLink: v[29],
+ ReadDir: v[30],
+ ServerCaps: v[31],
+ DelegReturn: v[32],
+ GetACL: v[33],
+ SetACL: v[34],
+ FsLocations: v[35],
+ ReleaseLockowner: v[36],
+ Secinfo: v[37],
+ FsidPresent: v[38],
+ ExchangeID: v[39],
+ CreateSession: v[40],
+ DestroySession: v[41],
+ Sequence: v[42],
+ GetLeaseTime: v[43],
+ ReclaimComplete: v[44],
+ LayoutGet: v[45],
+ GetDeviceInfo: v[46],
+ LayoutCommit: v[47],
+ LayoutReturn: v[48],
+ SecinfoNoName: v[49],
+ TestStateID: v[50],
+ FreeStateID: v[51],
+ GetDeviceList: v[52],
+ BindConnToSession: v[53],
+ DestroyClientID: v[54],
+ Seek: v[55],
+ Allocate: v[56],
+ DeAllocate: v[57],
+ LayoutStats: v[58],
+ Clone: v[59],
+ }, nil
+}
+
+func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 2 {
+ return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+ }
+
+ return ServerV4Stats{
+ Null: v[1],
+ Compound: v[2],
+ }, nil
+}
+
+func parseV4Ops(v []uint64) (V4Ops, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values < 39 {
+ return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
+ }
+
+ stats := V4Ops{
+ Op0Unused: v[1],
+ Op1Unused: v[2],
+ Op2Future: v[3],
+ Access: v[4],
+ Close: v[5],
+ Commit: v[6],
+ Create: v[7],
+ DelegPurge: v[8],
+ DelegReturn: v[9],
+ GetAttr: v[10],
+ GetFH: v[11],
+ Link: v[12],
+ Lock: v[13],
+ Lockt: v[14],
+ Locku: v[15],
+ Lookup: v[16],
+ LookupRoot: v[17],
+ Nverify: v[18],
+ Open: v[19],
+ OpenAttr: v[20],
+ OpenConfirm: v[21],
+ OpenDgrd: v[22],
+ PutFH: v[23],
+ PutPubFH: v[24],
+ PutRootFH: v[25],
+ Read: v[26],
+ ReadDir: v[27],
+ ReadLink: v[28],
+ Remove: v[29],
+ Rename: v[30],
+ Renew: v[31],
+ RestoreFH: v[32],
+ SaveFH: v[33],
+ SecInfo: v[34],
+ SetAttr: v[35],
+ Verify: v[36],
+ Write: v[37],
+ RelLockOwner: v[38],
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
new file mode 100644
index 00000000..c0d3a5ad
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
+func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
+ stats := &ClientRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFS metric line %q", line)
+ }
+
+ values, err := util.ParseUint64s(parts[1:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ClientRPC, err = parseClientRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ClientV4Stats, err = parseClientV4Stats(values)
+ default:
+ return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFS file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
new file mode 100644
index 00000000..57bb4a35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
+func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
+ stats := &ServerRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+ }
+ label := parts[0]
+
+ var values []uint64
+ var err error
+ if label == "th" {
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
+ }
+ values, err = util.ParseUint64s(parts[1:3])
+ } else {
+ values, err = util.ParseUint64s(parts[1:])
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "rc":
+ stats.ReplyCache, err = parseReplyCache(values)
+ case "fh":
+ stats.FileHandles, err = parseFileHandles(values)
+ case "io":
+ stats.InputOutput, err = parseInputOutput(values)
+ case "th":
+ stats.Threads, err = parseThreads(values)
+ case "ra":
+ stats.ReadAheadCache, err = parseReadAheadCache(values)
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ServerRPC, err = parseServerRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ServerV4Stats, err = parseServerV4Stats(values)
+ case "proc4ops":
+ stats.V4Ops, err = parseV4Ops(values)
+ default:
+ return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 0d0a6a90..7cf5b8ac 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -1,6 +1,20 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
@@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Comm returns the command name of a process.
@@ -192,6 +206,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index b4e31d7b..0251c83b 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) {
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
- if err != nil {
- return pio, err
- }
- return pio, nil
+ return pio, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 2df997ce..f04ba6fd 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -13,46 +26,46 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
- CPUTime int
+ CPUTime int64
// Maximum size of files that the process may create.
- FileSize int
+ FileSize int64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
- DataSize int
+ DataSize int64
// Maximum size of the process stack in bytes.
- StackSize int
+ StackSize int64
// Maximum size of a core file.
- CoreFileSize int
+ CoreFileSize int64
// Limit of the process's resident set in pages.
- ResidentSet int
+ ResidentSet int64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
- Processes int
+ Processes int64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
- OpenFiles int
+ OpenFiles int64
// Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int
+ LockedMemory int64
// Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int
+ AddressSpace int64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
- FileLocks int
+ FileLocks int64
// Limit of signals that may be queued for the real user ID of the calling
// process.
- PendingSignals int
+ PendingSignals int64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
- MsqqueueSize int
+ MsqqueueSize int64
// Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int
+ NicePriority int64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
- RealtimePriority int
+ RealtimePriority int64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
- RealtimeTimeout int
+ RealtimeTimeout int64
}
const (
@@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
return l, s.Err()
}
-func parseInt(s string) (int, error) {
+func parseInt(s string) (int64, error) {
if s == limitsUnlimited {
return -1, nil
}
- i, err := strconv.ParseInt(s, 10, 32)
+ i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
}
- return int(i), nil
+ return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 00000000..d06c26eb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+ Type string // Namespace type.
+ Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) NewNamespaces() (Namespaces, error) {
+ d, err := os.Open(p.path("ns"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+ }
+
+ ns := make(Namespaces, len(names))
+ for _, name := range names {
+ target, err := os.Readlink(p.path("ns", name))
+ if err != nil {
+ return nil, err
+ }
+
+ fields := strings.SplitN(target, ":", 2)
+ if len(fields) != 2 {
+ return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+ }
+
+ typ := fields[0]
+ inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+ }
+
+ ns[name] = Namespace{typ, uint32(inode)}
+ }
+
+ return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 724e271b..3cf2a9f1 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 1ca217e8..61eb6b0e 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -1,17 +1,81 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"bufio"
"fmt"
+ "io"
"os"
"strconv"
"strings"
)
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
// Stat represents kernel/system statistics.
type Stat struct {
// Boot time in seconds since the Epoch.
- BootTime int64
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU []CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
}
// NewStat returns kernel/system statistics read from /proc/stat.
@@ -24,33 +88,145 @@ func NewStat() (Stat, error) {
return fs.NewStat()
}
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
+ // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}
defer f.Close()
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- if !strings.HasPrefix(line, "btime") {
+ stat := Stat{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least
+ if len(parts) < 2 {
continue
}
- fields := strings.Fields(line)
- if len(fields) != 2 {
- return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
- }
- i, err := strconv.ParseInt(fields[1], 10, 32)
- if err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ for int64(len(stat.CPU)) <= cpuID {
+ stat.CPU = append(stat.CPU, CPUStat{})
+ }
+ stat.CPU[cpuID] = cpuStat
+ }
}
- return Stat{BootTime: i}, nil
}
- if err := s.Err(); err != nil {
+
+ if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
}
- return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+ return stat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 00000000..ffe9df50
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldnt parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 00000000..2bc0ef34
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,330 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+ const (
+ // Fields parsed into stats structures.
+ fieldExtentAlloc = "extent_alloc"
+ fieldAbt = "abt"
+ fieldBlkMap = "blk_map"
+ fieldBmbt = "bmbt"
+ fieldDir = "dir"
+ fieldTrans = "trans"
+ fieldIg = "ig"
+ fieldLog = "log"
+ fieldRw = "rw"
+ fieldAttr = "attr"
+ fieldIcluster = "icluster"
+ fieldVnodes = "vnodes"
+ fieldBuf = "buf"
+ fieldXpc = "xpc"
+
+ // Unimplemented at this time due to lack of documentation.
+ fieldPushAil = "push_ail"
+ fieldXstrat = "xstrat"
+ fieldAbtb2 = "abtb2"
+ fieldAbtc2 = "abtc2"
+ fieldBmbt2 = "bmbt2"
+ fieldIbt2 = "ibt2"
+ fieldFibt2 = "fibt2"
+ fieldQm = "qm"
+ fieldDebug = "debug"
+ )
+
+ var xfss Stats
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect at least a string label and a single integer value, ex:
+ // - abt 0
+ // - rw 1 2
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) < 2 {
+ continue
+ }
+ label := ss[0]
+
+ // Extended precision counters are uint64 values.
+ if label == fieldXpc {
+ us, err := util.ParseUint64s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+ if err != nil {
+ return nil, err
+ }
+
+ continue
+ }
+
+ // All other counters are uint32 values.
+ us, err := util.ParseUint32s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ switch label {
+ case fieldExtentAlloc:
+ xfss.ExtentAllocation, err = extentAllocationStats(us)
+ case fieldAbt:
+ xfss.AllocationBTree, err = btreeStats(us)
+ case fieldBlkMap:
+ xfss.BlockMapping, err = blockMappingStats(us)
+ case fieldBmbt:
+ xfss.BlockMapBTree, err = btreeStats(us)
+ case fieldDir:
+ xfss.DirectoryOperation, err = directoryOperationStats(us)
+ case fieldTrans:
+ xfss.Transaction, err = transactionStats(us)
+ case fieldIg:
+ xfss.InodeOperation, err = inodeOperationStats(us)
+ case fieldLog:
+ xfss.LogOperation, err = logOperationStats(us)
+ case fieldRw:
+ xfss.ReadWrite, err = readWriteStats(us)
+ case fieldAttr:
+ xfss.AttributeOperation, err = attributeOperationStats(us)
+ case fieldIcluster:
+ xfss.InodeClustering, err = inodeClusteringStats(us)
+ case fieldVnodes:
+ xfss.Vnode, err = vnodeStats(us)
+ case fieldBuf:
+ xfss.Buffer, err = bufferStats(us)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+ if l := len(us); l != 4 {
+ return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+ }
+
+ return ExtentAllocationStats{
+ ExtentsAllocated: us[0],
+ BlocksAllocated: us[1],
+ ExtentsFreed: us[2],
+ BlocksFreed: us[3],
+ }, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+ if l := len(us); l != 4 {
+ return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+ }
+
+ return BTreeStats{
+ Lookups: us[0],
+ Compares: us[1],
+ RecordsInserted: us[2],
+ RecordsDeleted: us[3],
+ }, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+ if l := len(us); l != 7 {
+ return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+ }
+
+ return BlockMappingStats{
+ Reads: us[0],
+ Writes: us[1],
+ Unmaps: us[2],
+ ExtentListInsertions: us[3],
+ ExtentListDeletions: us[4],
+ ExtentListLookups: us[5],
+ ExtentListCompares: us[6],
+ }, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+ if l := len(us); l != 4 {
+ return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+ }
+
+ return DirectoryOperationStats{
+ Lookups: us[0],
+ Creates: us[1],
+ Removes: us[2],
+ Getdents: us[3],
+ }, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+ if l := len(us); l != 3 {
+ return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+ }
+
+ return TransactionStats{
+ Sync: us[0],
+ Async: us[1],
+ Empty: us[2],
+ }, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+ if l := len(us); l != 7 {
+ return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+ }
+
+ return InodeOperationStats{
+ Attempts: us[0],
+ Found: us[1],
+ Recycle: us[2],
+ Missed: us[3],
+ Duplicate: us[4],
+ Reclaims: us[5],
+ AttributeChange: us[6],
+ }, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+ if l := len(us); l != 5 {
+ return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+ }
+
+ return LogOperationStats{
+ Writes: us[0],
+ Blocks: us[1],
+ NoInternalBuffers: us[2],
+ Force: us[3],
+ ForceSleep: us[4],
+ }, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+ if l := len(us); l != 2 {
+ return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+ }
+
+ return ReadWriteStats{
+ Read: us[0],
+ Write: us[1],
+ }, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+ if l := len(us); l != 4 {
+ return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+ }
+
+ return AttributeOperationStats{
+ Get: us[0],
+ Set: us[1],
+ Remove: us[2],
+ List: us[3],
+ }, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+ if l := len(us); l != 3 {
+ return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+ }
+
+ return InodeClusteringStats{
+ Iflush: us[0],
+ Flush: us[1],
+ FlushInode: us[2],
+ }, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+ // The attribute "Free" appears to not be available on older XFS
+ // stats versions. Therefore, 7 or 8 elements may appear in
+ // this slice.
+ l := len(us)
+ if l != 7 && l != 8 {
+ return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+ }
+
+ s := VnodeStats{
+ Active: us[0],
+ Allocate: us[1],
+ Get: us[2],
+ Hold: us[3],
+ Release: us[4],
+ Reclaim: us[5],
+ Remove: us[6],
+ }
+
+ // Skip adding free, unless it is present. The zero value will
+ // be used in place of an actual count.
+ if l == 7 {
+ return s, nil
+ }
+
+ s.Free = us[7]
+ return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+ if l := len(us); l != 9 {
+ return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+ }
+
+ return BufferStats{
+ Get: us[0],
+ Create: us[1],
+ GetLocked: us[2],
+ GetLockedWaited: us[3],
+ BusyLocked: us[4],
+ MissLocked: us[5],
+ PageRetries: us[6],
+ PageFound: us[7],
+ GetRead: us[8],
+ }, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+ if l := len(us); l != 3 {
+ return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+ }
+
+ return ExtendedPrecisionStats{
+ FlushBytes: us[0],
+ WriteBytes: us[1],
+ ReadBytes: us[2],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 00000000..d86794b7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+ // The name of the filesystem used to source these statistics.
+ // If empty, this indicates aggregated statistics for all XFS
+ // filesystems on the host.
+ Name string
+
+ ExtentAllocation ExtentAllocationStats
+ AllocationBTree BTreeStats
+ BlockMapping BlockMappingStats
+ BlockMapBTree BTreeStats
+ DirectoryOperation DirectoryOperationStats
+ Transaction TransactionStats
+ InodeOperation InodeOperationStats
+ LogOperation LogOperationStats
+ ReadWrite ReadWriteStats
+ AttributeOperation AttributeOperationStats
+ InodeClustering InodeClusteringStats
+ Vnode VnodeStats
+ Buffer BufferStats
+ ExtendedPrecision ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+ ExtentsAllocated uint32
+ BlocksAllocated uint32
+ ExtentsFreed uint32
+ BlocksFreed uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+ Lookups uint32
+ Compares uint32
+ RecordsInserted uint32
+ RecordsDeleted uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+ Reads uint32
+ Writes uint32
+ Unmaps uint32
+ ExtentListInsertions uint32
+ ExtentListDeletions uint32
+ ExtentListLookups uint32
+ ExtentListCompares uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+ Lookups uint32
+ Creates uint32
+ Removes uint32
+ Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+ Sync uint32
+ Async uint32
+ Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+ Attempts uint32
+ Found uint32
+ Recycle uint32
+ Missed uint32
+ Duplicate uint32
+ Reclaims uint32
+ AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+ Writes uint32
+ Blocks uint32
+ NoInternalBuffers uint32
+ Force uint32
+ ForceSleep uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+ Read uint32
+ Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+ Get uint32
+ Set uint32
+ Remove uint32
+ List uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+ Iflush uint32
+ Flush uint32
+ FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+ Active uint32
+ Allocate uint32
+ Get uint32
+ Hold uint32
+ Release uint32
+ Reclaim uint32
+ Remove uint32
+ Free uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+ Get uint32
+ Create uint32
+ GetLocked uint32
+ GetLockedWaited uint32
+ BusyLocked uint32
+ MissLocked uint32
+ PageRetries uint32
+ PageFound uint32
+ GetRead uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+ FlushBytes uint64
+ WriteBytes uint64
+ ReadBytes uint64
+}
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/LICENSE
rename to vendor/github.com/sirupsen/logrus/LICENSE
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/alt_exit.go
rename to vendor/github.com/sirupsen/logrus/alt_exit.go
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/doc.go
rename to vendor/github.com/sirupsen/logrus/doc.go
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
similarity index 93%
rename from vendor/github.com/Sirupsen/logrus/entry.go
rename to vendor/github.com/sirupsen/logrus/entry.go
index 1fad45e0..778f4c9f 100644
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -94,38 +94,47 @@ func (entry Entry) log(level Level, msg string) {
entry.Level = level
entry.Message = msg
- entry.Logger.mu.Lock()
- err := entry.Logger.Hooks.Fire(level, &entry)
- entry.Logger.mu.Unlock()
- if err != nil {
- entry.Logger.mu.Lock()
- fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
- entry.Logger.mu.Unlock()
- }
+ entry.fireHooks()
+
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
- serialized, err := entry.Logger.Formatter.Format(&entry)
+
+ entry.write()
+
entry.Buffer = nil
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) fireHooks() {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ err := entry.Logger.Hooks.Fire(entry.Level, &entry)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ }
+}
+
+func (entry *Entry) write() {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
if err != nil {
- entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- entry.Logger.mu.Unlock()
} else {
- entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
- entry.Logger.mu.Unlock()
- }
-
- // To avoid Entry#log() returning a value that only would make sense for
- // panic() to use in Entry#Panic(), we avoid the allocation by checking
- // directly here.
- if level <= PanicLevel {
- panic(&entry)
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/exported.go
rename to vendor/github.com/sirupsen/logrus/exported.go
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/formatter.go
rename to vendor/github.com/sirupsen/logrus/formatter.go
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/hooks.go
rename to vendor/github.com/sirupsen/logrus/hooks.go
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/json_formatter.go
rename to vendor/github.com/sirupsen/logrus/json_formatter.go
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/logger.go
rename to vendor/github.com/sirupsen/logrus/logger.go
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/logrus.go
rename to vendor/github.com/sirupsen/logrus/logrus.go
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
similarity index 84%
rename from vendor/github.com/Sirupsen/logrus/terminal_bsd.go
rename to vendor/github.com/sirupsen/logrus/terminal_bsd.go
index d7b3893f..4880d13d 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
@@ -1,5 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
+// +build !appengine,!gopherjs
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
similarity index 75%
rename from vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
rename to vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
index 2403de98..3de08e80 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -1,4 +1,4 @@
-// +build appengine
+// +build appengine gopherjs
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
similarity index 88%
rename from vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
rename to vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
index 116bcb4e..067047a1 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -1,4 +1,4 @@
-// +build !appengine
+// +build !appengine,!gopherjs
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
similarity index 90%
rename from vendor/github.com/Sirupsen/logrus/terminal_linux.go
rename to vendor/github.com/sirupsen/logrus/terminal_linux.go
index 88d7298e..f29a0097 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go
@@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !appengine
+// +build !appengine,!gopherjs
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/text_formatter.go
rename to vendor/github.com/sirupsen/logrus/text_formatter.go
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
similarity index 100%
rename from vendor/github.com/Sirupsen/logrus/writer.go
rename to vendor/github.com/sirupsen/logrus/writer.go
diff --git a/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md b/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md
deleted file mode 100644
index 1820ecb3..00000000
--- a/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# Contributing
-
-In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted.
-
-Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines:
-
-- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request.
-- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license.
-- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set.
-- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out.
- - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc...
- - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc...
diff --git a/vendor/github.com/smartystreets/assertions/README.md b/vendor/github.com/smartystreets/assertions/README.md
deleted file mode 100644
index 58383bb0..00000000
--- a/vendor/github.com/smartystreets/assertions/README.md
+++ /dev/null
@@ -1,575 +0,0 @@
-# assertions
---
- import "github.com/smartystreets/assertions"
-
-Package assertions contains the implementations for all assertions which are
-referenced in goconvey's `convey` package
-(github.com/smartystreets/goconvey/convey) and gunit
-(github.com/smartystreets/gunit) for use with the So(...) method. They can also
-be used in traditional Go test functions and even in applications.
-
-Many of the assertions lean heavily on work done by Aaron Jacobs in his
-excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The
-ShouldResemble assertion leans heavily on work done by Daniel Jacques in his
-very helpful go-render library. (https://github.com/luci/go-render)
-
-## Usage
-
-#### func GoConveyMode
-
-```go
-func GoConveyMode(yes bool)
-```
-GoConveyMode provides control over JSON serialization of failures. When using
-the assertions in this package from the convey package JSON results are very
-helpful and can be rendered in a DIFF view. In that case, this function will be
-called with a true value to enable the JSON serialization. By default, the
-assertions in this package will not serializer a JSON result, making standalone
-ussage more convenient.
-
-#### func ShouldAlmostEqual
-
-```go
-func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldAlmostEqual makes sure that two parameters are close enough to being
-equal. The acceptable delta may be specified with a third argument, or a very
-small default delta will be used.
-
-#### func ShouldBeBetween
-
-```go
-func ShouldBeBetween(actual interface{}, expected ...interface{}) string
-```
-ShouldBeBetween receives exactly three parameters: an actual value, a lower
-bound, and an upper bound. It ensures that the actual value is between both
-bounds (but not equal to either of them).
-
-#### func ShouldBeBetweenOrEqual
-
-```go
-func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a
-lower bound, and an upper bound. It ensures that the actual value is between
-both bounds or equal to one of them.
-
-#### func ShouldBeBlank
-
-```go
-func ShouldBeBlank(actual interface{}, expected ...interface{}) string
-```
-ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal
-to "".
-
-#### func ShouldBeChronological
-
-```go
-func ShouldBeChronological(actual interface{}, expected ...interface{}) string
-```
-ShouldBeChronological receives a []time.Time slice and asserts that the are in
-chronological order starting with the first time.Time as the earliest.
-
-#### func ShouldBeEmpty
-
-```go
-func ShouldBeEmpty(actual interface{}, expected ...interface{}) string
-```
-ShouldBeEmpty receives a single parameter (actual) and determines whether or not
-calling len(actual) would return `0`. It obeys the rules specified by the len
-function for determining length: http://golang.org/pkg/builtin/#len
-
-#### func ShouldBeFalse
-
-```go
-func ShouldBeFalse(actual interface{}, expected ...interface{}) string
-```
-ShouldBeFalse receives a single parameter and ensures that it is false.
-
-#### func ShouldBeGreaterThan
-
-```go
-func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string
-```
-ShouldBeGreaterThan receives exactly two parameters and ensures that the first
-is greater than the second.
-
-#### func ShouldBeGreaterThanOrEqualTo
-
-```go
-func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string
-```
-ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that
-the first is greater than or equal to the second.
-
-#### func ShouldBeIn
-
-```go
-func ShouldBeIn(actual interface{}, expected ...interface{}) string
-```
-ShouldBeIn receives at least 2 parameters. The first is a proposed member of the
-collection that is passed in either as the second parameter, or of the
-collection that is comprised of all the remaining parameters. This assertion
-ensures that the proposed member is in the collection (using ShouldEqual).
-
-#### func ShouldBeLessThan
-
-```go
-func ShouldBeLessThan(actual interface{}, expected ...interface{}) string
-```
-ShouldBeLessThan receives exactly two parameters and ensures that the first is
-less than the second.
-
-#### func ShouldBeLessThanOrEqualTo
-
-```go
-func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string
-```
-ShouldBeLessThan receives exactly two parameters and ensures that the first is
-less than or equal to the second.
-
-#### func ShouldBeNil
-
-```go
-func ShouldBeNil(actual interface{}, expected ...interface{}) string
-```
-ShouldBeNil receives a single parameter and ensures that it is nil.
-
-#### func ShouldBeTrue
-
-```go
-func ShouldBeTrue(actual interface{}, expected ...interface{}) string
-```
-ShouldBeTrue receives a single parameter and ensures that it is true.
-
-#### func ShouldBeZeroValue
-
-```go
-func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string
-```
-ShouldBeZeroValue receives a single parameter and ensures that it is the Go
-equivalent of the default value, or "zero" value.
-
-#### func ShouldContain
-
-```go
-func ShouldContain(actual interface{}, expected ...interface{}) string
-```
-ShouldContain receives exactly two parameters. The first is a slice and the
-second is a proposed member. Membership is determined using ShouldEqual.
-
-#### func ShouldContainKey
-
-```go
-func ShouldContainKey(actual interface{}, expected ...interface{}) string
-```
-ShouldContainKey receives exactly two parameters. The first is a map and the
-second is a proposed key. Keys are compared with a simple '=='.
-
-#### func ShouldContainSubstring
-
-```go
-func ShouldContainSubstring(actual interface{}, expected ...interface{}) string
-```
-ShouldContainSubstring receives exactly 2 string parameters and ensures that the
-first contains the second as a substring.
-
-#### func ShouldEndWith
-
-```go
-func ShouldEndWith(actual interface{}, expected ...interface{}) string
-```
-ShouldEndWith receives exactly 2 string parameters and ensures that the first
-ends with the second.
-
-#### func ShouldEqual
-
-```go
-func ShouldEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldEqual receives exactly two parameters and does an equality check.
-
-#### func ShouldEqualTrimSpace
-
-```go
-func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string
-```
-ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the
-first is equal to the second after removing all leading and trailing whitespace
-using strings.TrimSpace(first).
-
-#### func ShouldEqualWithout
-
-```go
-func ShouldEqualWithout(actual interface{}, expected ...interface{}) string
-```
-ShouldEqualWithout receives exactly 3 string parameters and ensures that the
-first is equal to the second after removing all instances of the third from the
-first using strings.Replace(first, third, "", -1).
-
-#### func ShouldHappenAfter
-
-```go
-func ShouldHappenAfter(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the
-first happens after the second.
-
-#### func ShouldHappenBefore
-
-```go
-func ShouldHappenBefore(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the
-first happens before the second.
-
-#### func ShouldHappenBetween
-
-```go
-func ShouldHappenBetween(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the
-first happens between (not on) the second and third.
-
-#### func ShouldHappenOnOrAfter
-
-```go
-func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that
-the first happens on or after the second.
-
-#### func ShouldHappenOnOrBefore
-
-```go
-func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that
-the first happens on or before the second.
-
-#### func ShouldHappenOnOrBetween
-
-```go
-func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that
-the first happens between or on the second and third.
-
-#### func ShouldHappenWithin
-
-```go
-func ShouldHappenWithin(actual interface{}, expected ...interface{}) string
-```
-ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
-arguments) and asserts that the first time.Time happens within or on the
-duration specified relative to the other time.Time.
-
-#### func ShouldHaveLength
-
-```go
-func ShouldHaveLength(actual interface{}, expected ...interface{}) string
-```
-ShouldHaveLength receives 2 parameters. The first is a collection to check the
-length of, the second being the expected length. It obeys the rules specified by
-the len function for determining length: http://golang.org/pkg/builtin/#len
-
-#### func ShouldHaveSameTypeAs
-
-```go
-func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string
-```
-ShouldHaveSameTypeAs receives exactly two parameters and compares their
-underlying types for equality.
-
-#### func ShouldImplement
-
-```go
-func ShouldImplement(actual interface{}, expectedList ...interface{}) string
-```
-ShouldImplement receives exactly two parameters and ensures that the first
-implements the interface type of the second.
-
-#### func ShouldNotAlmostEqual
-
-```go
-func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
-
-#### func ShouldNotBeBetween
-
-```go
-func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeBetween receives exactly three parameters: an actual value, a lower
-bound, and an upper bound. It ensures that the actual value is NOT between both
-bounds.
-
-#### func ShouldNotBeBetweenOrEqual
-
-```go
-func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a
-lower bound, and an upper bound. It ensures that the actual value is nopt
-between the bounds nor equal to either of them.
-
-#### func ShouldNotBeBlank
-
-```go
-func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is
-equal to "".
-
-#### func ShouldNotBeEmpty
-
-```go
-func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeEmpty receives a single parameter (actual) and determines whether or
-not calling len(actual) would return a value greater than zero. It obeys the
-rules specified by the `len` function for determining length:
-http://golang.org/pkg/builtin/#len
-
-#### func ShouldNotBeIn
-
-```go
-func ShouldNotBeIn(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of
-the collection that is passed in either as the second parameter, or of the
-collection that is comprised of all the remaining parameters. This assertion
-ensures that the proposed member is NOT in the collection (using ShouldEqual).
-
-#### func ShouldNotBeNil
-
-```go
-func ShouldNotBeNil(actual interface{}, expected ...interface{}) string
-```
-ShouldNotBeNil receives a single parameter and ensures that it is not nil.
-
-#### func ShouldNotContain
-
-```go
-func ShouldNotContain(actual interface{}, expected ...interface{}) string
-```
-ShouldNotContain receives exactly two parameters. The first is a slice and the
-second is a proposed member. Membership is determinied using ShouldEqual.
-
-#### func ShouldNotContainKey
-
-```go
-func ShouldNotContainKey(actual interface{}, expected ...interface{}) string
-```
-ShouldNotContainKey receives exactly two parameters. The first is a map and the
-second is a proposed absent key. Keys are compared with a simple '=='.
-
-#### func ShouldNotContainSubstring
-
-```go
-func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string
-```
-ShouldNotContainSubstring receives exactly 2 string parameters and ensures that
-the first does NOT contain the second as a substring.
-
-#### func ShouldNotEndWith
-
-```go
-func ShouldNotEndWith(actual interface{}, expected ...interface{}) string
-```
-ShouldEndWith receives exactly 2 string parameters and ensures that the first
-does not end with the second.
-
-#### func ShouldNotEqual
-
-```go
-func ShouldNotEqual(actual interface{}, expected ...interface{}) string
-```
-ShouldNotEqual receives exactly two parameters and does an inequality check.
-
-#### func ShouldNotHappenOnOrBetween
-
-```go
-func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string
-```
-ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts
-that the first does NOT happen between or on the second or third.
-
-#### func ShouldNotHappenWithin
-
-```go
-func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string
-```
-ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
-arguments) and asserts that the first time.Time does NOT happen within or on the
-duration specified relative to the other time.Time.
-
-#### func ShouldNotHaveSameTypeAs
-
-```go
-func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string
-```
-ShouldNotHaveSameTypeAs receives exactly two parameters and compares their
-underlying types for inequality.
-
-#### func ShouldNotImplement
-
-```go
-func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string
-```
-ShouldNotImplement receives exactly two parameters and ensures that the first
-does NOT implement the interface type of the second.
-
-#### func ShouldNotPanic
-
-```go
-func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string)
-```
-ShouldNotPanic receives a void, niladic function and expects to execute the
-function without any panic.
-
-#### func ShouldNotPanicWith
-
-```go
-func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string)
-```
-ShouldNotPanicWith receives a void, niladic function and expects to recover a
-panic whose content differs from the second argument.
-
-#### func ShouldNotPointTo
-
-```go
-func ShouldNotPointTo(actual interface{}, expected ...interface{}) string
-```
-ShouldNotPointTo receives exactly two parameters and checks to see that they
-point to different addresess.
-
-#### func ShouldNotResemble
-
-```go
-func ShouldNotResemble(actual interface{}, expected ...interface{}) string
-```
-ShouldNotResemble receives exactly two parameters and does an inverse deep equal
-check (see reflect.DeepEqual)
-
-#### func ShouldNotStartWith
-
-```go
-func ShouldNotStartWith(actual interface{}, expected ...interface{}) string
-```
-ShouldNotStartWith receives exactly 2 string parameters and ensures that the
-first does not start with the second.
-
-#### func ShouldPanic
-
-```go
-func ShouldPanic(actual interface{}, expected ...interface{}) (message string)
-```
-ShouldPanic receives a void, niladic function and expects to recover a panic.
-
-#### func ShouldPanicWith
-
-```go
-func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string)
-```
-ShouldPanicWith receives a void, niladic function and expects to recover a panic
-with the second argument as the content.
-
-#### func ShouldPointTo
-
-```go
-func ShouldPointTo(actual interface{}, expected ...interface{}) string
-```
-ShouldPointTo receives exactly two parameters and checks to see that they point
-to the same address.
-
-#### func ShouldResemble
-
-```go
-func ShouldResemble(actual interface{}, expected ...interface{}) string
-```
-ShouldResemble receives exactly two parameters and does a deep equal check (see
-reflect.DeepEqual)
-
-#### func ShouldStartWith
-
-```go
-func ShouldStartWith(actual interface{}, expected ...interface{}) string
-```
-ShouldStartWith receives exactly 2 string parameters and ensures that the first
-starts with the second.
-
-#### func So
-
-```go
-func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string)
-```
-So is a convenience function (as opposed to an inconvenience function?) for
-running assertions on arbitrary arguments in any context, be it for testing or
-even application logging. It allows you to perform assertion-like behavior (and
-get nicely formatted messages detailing discrepancies) but without the program
-blowing up or panicking. All that is required is to import this package and call
-`So` with one of the assertions exported by this package as the second
-parameter. The first return parameter is a boolean indicating if the assertion
-was true. The second return parameter is the well-formatted message showing why
-an assertion was incorrect, or blank if the assertion was correct.
-
-Example:
-
- if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
- log.Println(message)
- }
-
-#### type Assertion
-
-```go
-type Assertion struct {
-}
-```
-
-
-#### func New
-
-```go
-func New(t testingT) *Assertion
-```
-New swallows the *testing.T struct and prints failed assertions using t.Error.
-Example: assertions.New(t).So(1, should.Equal, 1)
-
-#### func (*Assertion) Failed
-
-```go
-func (this *Assertion) Failed() bool
-```
-Failed reports whether any calls to So (on this Assertion instance) have failed.
-
-#### func (*Assertion) So
-
-```go
-func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool
-```
-So calls the standalone So function and additionally, calls t.Error in failure
-scenarios.
-
-#### type FailureView
-
-```go
-type FailureView struct {
- Message string `json:"Message"`
- Expected string `json:"Expected"`
- Actual string `json:"Actual"`
-}
-```
-
-This struct is also declared in
-github.com/smartystreets/goconvey/convey/reporting. The json struct tags should
-be equal in both declarations.
-
-#### type Serializer
-
-```go
-type Serializer interface {
- // contains filtered or unexported methods
-}
-```
diff --git a/vendor/github.com/smartystreets/assertions/doc.go b/vendor/github.com/smartystreets/assertions/doc.go
index 50572325..ba30a926 100644
--- a/vendor/github.com/smartystreets/assertions/doc.go
+++ b/vendor/github.com/smartystreets/assertions/doc.go
@@ -5,6 +5,8 @@
// They can also be used in traditional Go test functions and even in
// applications.
//
+// https://smartystreets.com
+//
// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library.
// (https://github.com/jacobsa/oglematchers)
// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library.
diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go
index 23b7a586..313611ef 100644
--- a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go
+++ b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go
@@ -143,20 +143,24 @@ func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, imp
if !implicit {
writeType(buf, ptrs, vt)
}
- structAnon := vt.Name() == ""
buf.WriteRune('{')
- for i := 0; i < vt.NumField(); i++ {
- if i > 0 {
- buf.WriteString(", ")
- }
- anon := structAnon && isAnon(vt.Field(i).Type)
+ if rendered, ok := renderTime(v); ok {
+ buf.WriteString(rendered)
+ } else {
+ structAnon := vt.Name() == ""
+ for i := 0; i < vt.NumField(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ anon := structAnon && isAnon(vt.Field(i).Type)
- if !anon {
- buf.WriteString(vt.Field(i).Name)
- buf.WriteRune(':')
- }
+ if !anon {
+ buf.WriteString(vt.Field(i).Name)
+ buf.WriteRune(':')
+ }
- s.render(buf, 0, v.Field(i), anon)
+ s.render(buf, 0, v.Field(i), anon)
+ }
}
buf.WriteRune('}')
diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go
new file mode 100644
index 00000000..990c75d0
--- /dev/null
+++ b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go
@@ -0,0 +1,26 @@
+package render
+
+import (
+ "reflect"
+ "time"
+)
+
+func renderTime(value reflect.Value) (string, bool) {
+ if instant, ok := convertTime(value); !ok {
+ return "", false
+ } else if instant.IsZero() {
+ return "0", true
+ } else {
+ return instant.String(), true
+ }
+}
+
+func convertTime(value reflect.Value) (t time.Time, ok bool) {
+ if value.Type() == timeType {
+ defer func() { recover() }()
+ t, ok = value.Interface().(time.Time)
+ }
+ return
+}
+
+var timeType = reflect.TypeOf(time.Time{})
diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md b/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md
deleted file mode 100644
index 215a2bb7..00000000
--- a/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers)
-
-`oglematchers` is a package for the Go programming language containing a set of
-matchers, useful in a testing or mocking framework, inspired by and mostly
-compatible with [Google Test][googletest] for C++ and
-[Google JS Test][google-js-test]. The package is used by the
-[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking
-framework, which may be more directly useful to you, but can be generically used
-elsewhere as well.
-
-A "matcher" is simply an object with a `Matches` method defining a set of golang
-values matched by the matcher, and a `Description` method describing that set.
-For example, here are some matchers:
-
-```go
-// Numbers
-Equals(17.13)
-LessThan(19)
-
-// Strings
-Equals("taco")
-HasSubstr("burrito")
-MatchesRegex("t.*o")
-
-// Combining matchers
-AnyOf(LessThan(17), GreaterThan(19))
-```
-
-There are lots more; see [here][reference] for a reference. You can also add
-your own simply by implementing the `oglematchers.Matcher` interface.
-
-
-Installation
-------------
-
-First, make sure you have installed Go 1.0.2 or newer. See
-[here][golang-install] for instructions.
-
-Use the following command to install `oglematchers` and keep it up to date:
-
- go get -u github.com/smartystreets/assertions/internal/oglematchers
-
-
-Documentation
--------------
-
-See [here][reference] for documentation. Alternatively, you can install the
-package and then use `godoc`:
-
- godoc github.com/smartystreets/assertions/internal/oglematchers
-
-
-[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers
-[golang-install]: http://golang.org/doc/install.html
-[googletest]: http://code.google.com/p/googletest/
-[google-js-test]: http://code.google.com/p/google-js-test/
-[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest
-[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock
diff --git a/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey b/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey
deleted file mode 100644
index a2d9327d..00000000
--- a/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey
+++ /dev/null
@@ -1,4 +0,0 @@
-#ignore
--timeout=1s
-#-covermode=count
-#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting
\ No newline at end of file
diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
deleted file mode 100644
index 79982854..00000000
--- a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
+++ /dev/null
@@ -1,2 +0,0 @@
-#ignore
--timeout=1s
diff --git a/vendor/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt b/vendor/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt
new file mode 100755
index 00000000..75b52484
--- /dev/null
+++ b/vendor/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
new file mode 100644
index 00000000..2b00ddba
--- /dev/null
+++ b/vendor/golang.org/x/crypto/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
new file mode 100644
index 00000000..1fbd3e97
--- /dev/null
+++ b/vendor/golang.org/x/crypto/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
index 02dad484..731c89a2 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -108,9 +108,7 @@ func ReadPassword(fd int) ([]byte, error) {
return nil, err
}
- defer func() {
- unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
- }()
+ defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
return readPasswordLine(passwordReader(fd))
}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
index a2e1b57d..9e41b9f4 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -14,7 +14,7 @@ import (
// State contains the state of a terminal.
type State struct {
- state *unix.Termios
+ termios unix.Termios
}
// IsTerminal returns true if the given file descriptor is a terminal.
@@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) {
// restored.
// see http://cr.illumos.org/~webrev/andy_js/1060/
func MakeRaw(fd int) (*State, error) {
- oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
- oldTermios := *oldTermiosPtr
-
- newTermios := oldTermios
- newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
- newTermios.Oflag &^= syscall.OPOST
- newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
- newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
- newTermios.Cflag |= syscall.CS8
- newTermios.Cc[unix.VMIN] = 1
- newTermios.Cc[unix.VTIME] = 0
-
- if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
+
+ oldState := State{termios: *termios}
+
+ termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+ termios.Cflag &^= unix.CSIZE | unix.PARENB
+ termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
+
+ if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
return nil, err
}
- return &State{
- state: oldTermiosPtr,
- }, nil
+ return &oldState, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, oldState *State) error {
- return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
+ return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
}
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
- oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
if err != nil {
return nil, err
}
- return &State{
- state: oldTermiosPtr,
- }, nil
+ return &State{termios: *termios}, nil
}
// GetSize returns the dimensions of the given terminal.
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 92944f3b..8618955d 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -89,9 +89,15 @@ func ReadPassword(fd int) ([]byte, error) {
return nil, err
}
- defer func() {
- windows.SetConsoleMode(windows.Handle(fd), old)
- }()
+ defer windows.SetConsoleMode(windows.Handle(fd), old)
- return readPasswordLine(os.NewFile(uintptr(fd), "stdin"))
+ var h windows.Handle
+ p, _ := windows.GetCurrentProcess()
+ if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
+ return nil, err
+ }
+
+ f := os.NewFile(uintptr(h), "stdin")
+ defer f.Close()
+ return readPasswordLine(f)
}
diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/golang.org/x/sys/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/golang.org/x/sys/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
deleted file mode 100644
index bc6f6031..00000000
--- a/vendor/golang.org/x/sys/unix/README.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Building `sys/unix`
-
-The sys/unix package provides access to the raw system call interface of the
-underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
-
-Porting Go to a new architecture/OS combination or adding syscalls, types, or
-constants to an existing architecture/OS pair requires some manual effort;
-however, there are tools that automate much of the process.
-
-## Build Systems
-
-There are currently two ways we generate the necessary files. We are currently
-migrating the build system to use containers so the builds are reproducible.
-This is being done on an OS-by-OS basis. Please update this documentation as
-components of the build system change.
-
-### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`)
-
-The old build system generates the Go files based on the C header files
-present on your system. This means that files
-for a given GOOS/GOARCH pair must be generated on a system with that OS and
-architecture. This also means that the generated code can differ from system
-to system, based on differences in the header files.
-
-To avoid this, if you are using the old build system, only generate the Go
-files on an installation with unmodified header files. It is also important to
-keep track of which version of the OS the files were generated from (ex.
-Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
-and have each OS upgrade correspond to a single change.
-
-To build the files for your current OS and architecture, make sure GOOS and
-GOARCH are set correctly and run `mkall.sh`. This will generate the files for
-your specific system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, perl, go
-
-### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`)
-
-The new build system uses a Docker container to generate the go files directly
-from source checkouts of the kernel and various system libraries. This means
-that on any platform that supports Docker, all the files using the new build
-system can be generated at once, and generated files will not change based on
-what the person running the scripts has installed on their computer.
-
-The OS specific files for the new build system are located in the `${GOOS}`
-directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
-the kernel or system library updates, modify the Dockerfile at
-`${GOOS}/Dockerfile` to checkout the new release of the source.
-
-To build all the files under the new build system, you must be on an amd64/Linux
-system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
-then generate all of the files for all of the GOOS/GOARCH pairs in the new build
-system. Running `mkall.sh -n` shows the commands that will be run.
-
-Requirements: bash, perl, go, docker
-
-## Component files
-
-This section describes the various files used in the code generation process.
-It also contains instructions on how to modify these files to add a new
-architecture/OS or to add additional syscalls, types, or constants. Note that
-if you are using the new build system, the scripts cannot be called normally.
-They must be called from within the docker container.
-
-### asm files
-
-The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
-call dispatch. There are three entry points:
-```
- func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
- func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
- func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-```
-The first and second are the standard ones; they differ only in how many
-arguments can be passed to the kernel. The third is for low-level use by the
-ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
-let it know that a system call is running.
-
-When porting Go to an new architecture/OS, this file must be implemented for
-each GOOS/GOARCH pair.
-
-### mksysnum
-
-Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
-for the old system). This script takes in a list of header files containing the
-syscall number declarations and parses them to produce the corresponding list of
-Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
-constants.
-
-Adding new syscall numbers is mostly done by running the build on a sufficiently
-new installation of the target OS (or updating the source checkouts for the
-new build system). However, depending on the OS, you make need to update the
-parsing in mksysnum.
-
-### mksyscall.pl
-
-The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
-hand-written Go files which implement system calls (for unix, the specific OS,
-or the specific OS/Architecture pair respectively) that need special handling
-and list `//sys` comments giving prototypes for ones that can be generated.
-
-The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
-them into syscalls. This requires the name of the prototype in the comment to
-match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
-prototype can be exported (capitalized) or not.
-
-Adding a new syscall often just requires adding a new `//sys` function prototype
-with the desired arguments and a capitalized name so it is exported. However, if
-you want the interface to the syscall to be different, often one will make an
-unexported `//sys` prototype, an then write a custom wrapper in
-`syscall_${GOOS}.go`.
-
-### types files
-
-For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
-`types_${GOOS}.go` on the old system). This file includes standard C headers and
-creates Go type aliases to the corresponding C types. The file is then fed
-through godef to get the Go compatible definitions. Finally, the generated code
-is fed though mkpost.go to format the code correctly and remove any hidden or
-private identifiers. This cleaned-up code is written to
-`ztypes_${GOOS}_${GOARCH}.go`.
-
-The hardest part about preparing this file is figuring out which headers to
-include and which symbols need to be `#define`d to get the actual data
-structures that pass through to the kernel system calls. Some C libraries
-preset alternate versions for binary compatibility and translate them on the
-way in and out of system calls, but there is almost always a `#define` that can
-get the real ones.
-See `types_darwin.go` and `linux/types.go` for examples.
-
-To add a new type, add in the necessary include statement at the top of the
-file (if it is not already there) and add in a type alias line. Note that if
-your type is significantly different on different architectures, you may need
-some `#if/#elif` macros in your include statements.
-
-### mkerrors.sh
-
-This script is used to generate the system's various constants. This doesn't
-just include the error numbers and error strings, but also the signal numbers
-an a wide variety of miscellaneous constants. The constants come from the list
-of include files in the `includes_${uname}` variable. A regex then picks out
-the desired `#define` statements, and generates the corresponding Go constants.
-The error numbers and strings are generated from `#include `, and the
-signal numbers and strings are generated from `#include `. All of
-these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
-`_errors.c`, which prints out all the constants.
-
-To add a constant, add the header that includes it to the appropriate variable.
-Then, edit the regex (if necessary) to match the desired constant. Avoid making
-the regex too broad to avoid matching unintended constants.
-
-
-## Generated files
-
-### `zerror_${GOOS}_${GOARCH}.go`
-
-A file containing all of the system's generated error numbers, error strings,
-signal numbers, and constants. Generated by `mkerrors.sh` (see above).
-
-### `zsyscall_${GOOS}_${GOARCH}.go`
-
-A file containing all the generated syscalls for a specific GOOS and GOARCH.
-Generated by `mksyscall.pl` (see above).
-
-### `zsysnum_${GOOS}_${GOARCH}.go`
-
-A list of numeric constants for all the syscall number of the specific GOOS
-and GOARCH. Generated by mksysnum (see above).
-
-### `ztypes_${GOOS}_${GOARCH}.go`
-
-A file containing Go types for passing into (or returning from) syscalls.
-Generated by godefs and the types file (see above).
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index d81fbb5b..72afe333 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -16,7 +16,7 @@ const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
type CPUSet [cpuSetSize]cpuMask
func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
- _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(set)), uintptr(unsafe.Pointer(set)))
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
if e != 0 {
return errnoErr(e)
}
diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
index d5ed6726..603dd572 100644
--- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
+++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
@@ -13,17 +13,17 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-64
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-88
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
-TEXT ·Syscall9(SB),NOSPLIT,$0-112
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-64
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go
index 83b6bcea..df520487 100644
--- a/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -7,7 +7,7 @@
package unix
import (
- errorspkg "errors"
+ "errors"
"fmt"
)
@@ -60,26 +60,26 @@ func CapRightsSet(rights *CapRights, setrights []uint64) error {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return errorspkg.New("bad rights size")
+ return errors.New("bad rights size")
}
for _, right := range setrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errorspkg.New("bad right version")
+ return errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return err
}
if i >= n {
- return errorspkg.New("index overflow")
+ return errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch")
+ return errors.New("index mismatch")
}
rights.Rights[i] |= right
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch (after assign)")
+ return errors.New("index mismatch (after assign)")
}
}
@@ -95,26 +95,26 @@ func CapRightsClear(rights *CapRights, clearrights []uint64) error {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return errorspkg.New("bad rights size")
+ return errors.New("bad rights size")
}
for _, right := range clearrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errorspkg.New("bad right version")
+ return errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return err
}
if i >= n {
- return errorspkg.New("index overflow")
+ return errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch")
+ return errors.New("index mismatch")
}
rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch (after assign)")
+ return errors.New("index mismatch (after assign)")
}
}
@@ -130,22 +130,22 @@ func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return false, errorspkg.New("bad rights size")
+ return false, errors.New("bad rights size")
}
for _, right := range setrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return false, errorspkg.New("bad right version")
+ return false, errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return false, err
}
if i >= n {
- return false, errorspkg.New("index overflow")
+ return false, errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return false, errorspkg.New("index mismatch")
+ return false, errors.New("index mismatch")
}
if (rights.Rights[i] & right) != right {
return false, nil
diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go
index bd475812..95fd3531 100644
--- a/vendor/golang.org/x/sys/unix/dirent.go
+++ b/vendor/golang.org/x/sys/unix/dirent.go
@@ -6,97 +6,12 @@
package unix
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
+import "syscall"
// ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added
// to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
+ return syscall.ParseDirent(buf, max, names)
}
diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/fcntl.go
similarity index 74%
rename from vendor/golang.org/x/sys/unix/flock.go
rename to vendor/golang.org/x/sys/unix/fcntl.go
index 2994ce75..0c58c7e1 100644
--- a/vendor/golang.org/x/sys/unix/flock.go
+++ b/vendor/golang.org/x/sys/unix/fcntl.go
@@ -12,6 +12,12 @@ import "unsafe"
// systems by flock_linux_32bit.go to be SYS_FCNTL64.
var fcntl64Syscall uintptr = SYS_FCNTL
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
+ return int(valptr), err
+}
+
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
similarity index 100%
rename from vendor/golang.org/x/sys/unix/flock_linux_32bit.go
rename to vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
index 24e96b11..46523ced 100644
--- a/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -36,12 +36,3 @@ gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3
{
return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
-
-// Define the use function in C so that it is not inlined.
-
-extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
-
-void
-use(void *p __attribute__ ((unused)))
-{
-}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
deleted file mode 100755
index 1715122b..00000000
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# This script runs or (given -n) prints suggested commands to generate files for
-# the Architecture/OS specified by the GOARCH and GOOS environment variables.
-# See README.md for more information about how the build system works.
-
-GOOSARCH="${GOOS}_${GOARCH}"
-
-# defaults
-mksyscall="./mksyscall.pl"
-mkerrors="./mkerrors.sh"
-zerrors="zerrors_$GOOSARCH.go"
-mksysctl=""
-zsysctl="zsysctl_$GOOSARCH.go"
-mksysnum=
-mktypes=
-run="sh"
-cmd=""
-
-case "$1" in
--syscalls)
- for i in zsyscall*go
- do
- # Run the command line that appears in the first line
- # of the generated file to regenerate it.
- sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
- rm _$i
- done
- exit 0
- ;;
--n)
- run="cat"
- cmd="echo"
- shift
-esac
-
-case "$#" in
-0)
- ;;
-*)
- echo 'usage: mkall.sh [-n]' 1>&2
- exit 2
-esac
-
-if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
- # Use then new build system
- # Files generated through docker (use $cmd so you can Ctl-C the build or run)
- $cmd docker build --tag generate:$GOOS $GOOS
- $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
- exit
-fi
-
-GOOSARCH_in=syscall_$GOOSARCH.go
-case "$GOOSARCH" in
-_* | *_ | _)
- echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
- exit 1
- ;;
-darwin_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-darwin_amd64)
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-darwin_arm)
- mkerrors="$mkerrors"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-darwin_arm64)
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-dragonfly_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -dragonfly"
- mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_amd64)
- mkerrors="$mkerrors -m64"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-freebsd_arm)
- mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -arm"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-linux_sparc64)
- GOOSARCH_in=syscall_linux_sparc64.go
- unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
- mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_linux.pl $unistd_h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32 -netbsd"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -netbsd"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-netbsd_arm)
- mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -netbsd -arm"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-openbsd_386)
- mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32 -openbsd"
- mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_amd64)
- mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -openbsd"
- mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-openbsd_arm)
- mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -openbsd -arm"
- mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
- # Let the type of C char be signed for making the bare syscall
- # API consistent across platforms.
- mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
- ;;
-solaris_amd64)
- mksyscall="./mksyscall_solaris.pl"
- mkerrors="$mkerrors -m64"
- mksysnum=
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- ;;
-*)
- echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
- exit 1
- ;;
-esac
-
-(
- if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
- case "$GOOS" in
- *)
- syscall_goos="syscall_$GOOS.go"
- case "$GOOS" in
- darwin | dragonfly | freebsd | netbsd | openbsd)
- syscall_goos="syscall_bsd.go $syscall_goos"
- ;;
- esac
- if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
- ;;
- esac
- if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
- if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
- if [ -n "$mktypes" ]; then
- echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go";
- fi
-) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
deleted file mode 100755
index a452554b..00000000
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ /dev/null
@@ -1,578 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2009 The Go Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# Generate Go code listing errors and other #defined constant
-# values (ENAMETOOLONG etc.), by asking the preprocessor
-# about the definitions.
-
-unset LANG
-export LC_ALL=C
-export LC_CTYPE=C
-
-if test -z "$GOARCH" -o -z "$GOOS"; then
- echo 1>&2 "GOARCH or GOOS not defined in environment"
- exit 1
-fi
-
-# Check that we are using the new build system if we should
-if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
- if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
- echo 1>&2 "In the new build system, mkerrors should not be called directly."
- echo 1>&2 "See README.md"
- exit 1
- fi
-fi
-
-CC=${CC:-cc}
-
-if [[ "$GOOS" = "solaris" ]]; then
- # Assumes GNU versions of utilities in PATH.
- export PATH=/usr/gnu/bin:$PATH
-fi
-
-uname=$(uname)
-
-includes_Darwin='
-#define _DARWIN_C_SOURCE
-#define KERNEL
-#define _DARWIN_USE_64_BIT_INODE
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-'
-
-includes_DragonFly='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-'
-
-includes_FreeBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#if __FreeBSD__ >= 10
-#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
-#undef SIOCAIFADDR
-#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
-#undef SIOCSIFPHYADDR
-#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
-#endif
-'
-
-includes_Linux='
-#define _LARGEFILE_SOURCE
-#define _LARGEFILE64_SOURCE
-#ifndef __LP64__
-#define _FILE_OFFSET_BITS 64
-#endif
-#define _GNU_SOURCE
-
-// is broken on powerpc64, as it fails to include definitions of
-// these structures. We just include them copied from .
-#if defined(__powerpc__)
-struct sgttyb {
- char sg_ispeed;
- char sg_ospeed;
- char sg_erase;
- char sg_kill;
- short sg_flags;
-};
-
-struct tchars {
- char t_intrc;
- char t_quitc;
- char t_startc;
- char t_stopc;
- char t_eofc;
- char t_brkc;
-};
-
-struct ltchars {
- char t_suspc;
- char t_dsuspc;
- char t_rprntc;
- char t_flushc;
- char t_werasc;
- char t_lnextc;
-};
-#endif
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN 0x20000000
-#endif
-
-#ifndef PTRACE_GETREGS
-#define PTRACE_GETREGS 0xc
-#endif
-
-#ifndef PTRACE_SETREGS
-#define PTRACE_SETREGS 0xd
-#endif
-
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
-#ifdef SOL_BLUETOOTH
-// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
-// but it is already in bluetooth_linux.go
-#undef SOL_BLUETOOTH
-#endif
-
-// Certain constants are missing from the fs/crypto UAPI
-#define FS_KEY_DESC_PREFIX "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE 8
-#define FS_MAX_KEY_SIZE 64
-'
-
-includes_NetBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// Needed since refers to it...
-#define schedppq 1
-'
-
-includes_OpenBSD='
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// We keep some constants not supported in OpenBSD 5.5 and beyond for
-// the promise of compatibility.
-#define EMUL_ENABLED 0x1
-#define EMUL_NATIVE 0x2
-#define IPV6_FAITH 0x1d
-#define IPV6_OPTIONS 0x1
-#define IPV6_RTHDR_STRICT 0x1
-#define IPV6_SOCKOPT_RESERVED1 0x3
-#define SIOCGIFGENERIC 0xc020693a
-#define SIOCSIFGENERIC 0x80206939
-#define WALTSIG 0x4
-'
-
-includes_SunOS='
-#include
-#include