diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/csrf.iml b/.idea/csrf.iml
new file mode 100644
index 0000000..5e764c4
--- /dev/null
+++ b/.idea/csrf.iml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..3dadd68
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/csrf.go b/csrf.go
index 82d76e7..ae8b4fe 100644
--- a/csrf.go
+++ b/csrf.go
@@ -19,12 +19,13 @@ package csrf
import (
"crypto/rand"
"fmt"
+ "go.wandrs.dev/binding"
+ "go.wandrs.dev/inject"
+ "go.wandrs.dev/session"
r "math/rand"
"net/http"
+ "reflect"
"time"
-
- "github.com/go-macaron/session"
- "gopkg.in/macaron.v1"
)
// CSRF represents a CSRF service and is used to get the current token and validate a suspect token.
@@ -199,9 +200,11 @@ func prepareOptions(options []Options) Options {
// Generate maps CSRF to each request. If this request is a Get request, it will generate a new token.
// Additionally, depending on options set, generated tokens will be sent via Header and/or Cookie.
-func Generate(options ...Options) macaron.Handler {
+func Generate(options ...Options) func(http.Handler) http.Handler {
opt := prepareOptions(options)
- return func(ctx *macaron.Context, sess session.Store) {
+
+ return binding.Inject(func(injector inject.Injector) error {
+ ctx := binding.ResponseWriter(injector)
x := &csrf{
Secret: opt.Secret,
Header: opt.Header,
@@ -212,12 +215,12 @@ func Generate(options ...Options) macaron.Handler {
CookieHttpOnly: opt.CookieHttpOnly,
ErrorFunc: opt.ErrorFunc,
}
- ctx.MapTo(x, (*CSRF)(nil))
- if opt.Origin && len(ctx.Req.Header.Get("Origin")) > 0 {
- return
+ if opt.Origin && len(ctx.Header().Get("Origin")) > 0 {
+ return nil
}
+ sess := session.GetSession(injector)
x.ID = "0"
uid := sess.Get(opt.SessionKey)
if uid != nil {
@@ -231,8 +234,7 @@ func Generate(options ...Options) macaron.Handler {
_ = sess.Set(opt.oldSeesionKey, x.ID)
} else {
// If cookie present, map existing token, else generate a new one.
- if val := ctx.GetCookie(opt.Cookie); len(val) > 0 {
- // FIXME: test coverage.
+ if val := session.GetCookie(ctx.R().Request(), opt.Cookie); len(val) > 0 {
x.Token = val
} else {
needsNew = true
@@ -240,44 +242,67 @@ func Generate(options ...Options) macaron.Handler {
}
if needsNew {
- // FIXME: actionId.
x.Token = GenerateToken(x.Secret, x.ID, "POST")
if opt.SetCookie {
- ctx.SetCookie(opt.Cookie, x.Token, 0, opt.CookiePath, opt.CookieDomain, opt.Secure, opt.CookieHttpOnly, time.Now().AddDate(0, 0, 1))
+ newCookie := session.NewCookie(opt.Cookie, x.Token, opt.CookiePath, opt.CookieDomain, opt.Secure, opt.CookieHttpOnly, time.Now().AddDate(0, 0, 1))
+ ctx.Header().Add("Set-Cookie", newCookie.String())
}
}
if opt.SetHeader {
- ctx.Resp.Header().Add(opt.Header, x.Token)
+ ctx.Header().Add(opt.Header, x.Token)
}
- }
+ injector.Map(x)
+
+ return nil
+ })
}
// Csrfer maps CSRF to each request. If this request is a Get request, it will generate a new token.
// Additionally, depending on options set, generated tokens will be sent via Header and/or Cookie.
-func Csrfer(options ...Options) macaron.Handler {
+func Csrfer(options ...Options) func(next http.Handler) http.Handler {
return Generate(options...)
}
+func GetCSRF(injector inject.Injector) *csrf {
+ return injector.GetVal(reflect.TypeOf(&csrf{})).Interface().(*csrf)
+}
+
// Validate should be used as a per route middleware. It attempts to get a token from a "X-CSRFToken"
// HTTP header and then a "_csrf" form value. If one of these is found, the token will be validated
// using ValidToken. If this validation fails, custom Error is sent in the reply.
// If neither a header or form value is found, http.StatusBadRequest is sent.
-func Validate(ctx *macaron.Context, x CSRF) {
- if token := ctx.Req.Header.Get(x.GetHeaderName()); len(token) > 0 {
- if !x.ValidToken(token) {
- ctx.SetCookie(x.GetCookieName(), "", -1, x.GetCookiePath())
- x.Error(ctx.Resp)
- }
- return
- }
- if token := ctx.Req.FormValue(x.GetFormName()); len(token) > 0 {
- if !x.ValidToken(token) {
- ctx.SetCookie(x.GetCookieName(), "", -1, x.GetCookiePath())
- x.Error(ctx.Resp)
- }
- return
- }
+func Validate(x CSRF) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if token := r.Header.Get(x.GetHeaderName()); len(token) > 0 {
+ if !x.ValidToken(token) {
+ cookie := &http.Cookie{
+ Name: x.GetCookieName(),
+ Value: "",
+ Path: x.GetCookiePath(),
+ }
+ http.SetCookie(w, cookie)
+ x.Error(w)
+ return
+ }
+ } else if token := r.FormValue(x.GetFormName()); len(token) > 0 {
+ if !x.ValidToken(token) {
+ cookie := &http.Cookie{
+ Name: x.GetCookieName(),
+ Value: "",
+ Path: x.GetCookiePath(),
+ }
+ http.SetCookie(w, cookie)
+ x.Error(w)
+ return
+ }
+ } else {
+ http.Error(w, "Bad Request: no CSRF token present", http.StatusBadRequest)
+ return
+ }
- http.Error(ctx.Resp, "Bad Request: no CSRF token present", http.StatusBadRequest)
+ next.ServeHTTP(w, r)
+ })
+ }
}
diff --git a/go.mod b/go.mod
index ff381fc..da4be92 100644
--- a/go.mod
+++ b/go.mod
@@ -1,10 +1,12 @@
-module github.com/go-macaron/csrf
+module go.wandrs.dev/csrf
go 1.12
require (
github.com/go-macaron/session v0.0.0-20190805070824-1a3cdc6f5659
- github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337
- github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e
+ github.com/smartystreets/goconvey v1.6.4
+ go.wandrs.dev/binding v0.0.3-0.20210906074014-ef9ff4cd15de
+ go.wandrs.dev/inject v0.0.2-0.20210830111053-4cf53490454a
+ go.wandrs.dev/session v0.0.0-20230714090251-542544d34622
gopkg.in/macaron.v1 v1.3.4
)
diff --git a/go.sum b/go.sum
index 768ed2b..84b5daf 100644
--- a/go.sum
+++ b/go.sum
@@ -1,48 +1,212 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
+github.com/bradfitz/gomemcache v0.0.0-20230611145640-acc696258285/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
+github.com/couchbase/gomemcached v0.2.1/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo=
github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
+github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE=
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw=
+github.com/go-chi/chi/v5 v5.0.3/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
+github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191 h1:NjHlg70DuOkcAMqgt0+XA+NHwtu66MkTVVgR4fFWbcI=
github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191/go.mod h1:VFI2o2q9kYsC4o7VP1HrEVosiZZTd+MVT3YZx4gqvJw=
github.com/go-macaron/session v0.0.0-20190805070824-1a3cdc6f5659 h1:YXDFNK98PgKeBd+xM2Babdd6gyABG8H+SSAh5+hr0os=
github.com/go-macaron/session v0.0.0-20190805070824-1a3cdc6f5659/go.mod h1:tLd0QEudXocQckwcpCq5pCuTCuYc24I0bRJDuRe9OuQ=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/form/v4 v4.1.3 h1:SMUgkH+XBQkssHylgYzmy2VV4r37/pBYHgQnyqeBmmM=
+github.com/go-playground/form/v4 v4.1.3/go.mod h1:q1a2BY+AQUUzhl6xA/6hBetay6dEIhMHjgvJiGo6K7U=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.6.1 h1:W6TRDXt4WcWp4c4nf/G+6BkGdhiIo0k417gfr+V6u4I=
+github.com/go-playground/validator/v10 v10.6.1/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de/go.mod h1:3q8WtuPQsoRbatJuy3nvq/hRSvuBJrHHr+ybPPiNvHQ=
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VObJ7peltM+2n3PWOz7yTrfUuGbVFkzN0=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d/go.mod h1:vq0tzqLRu6TS7Id0wMo2N5QzJoKedVeovOpHjnykSzY=
+github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -50,46 +214,237 @@ github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
-github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
-github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM=
+github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
+github.com/unknwon/com v1.0.1 h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=
+github.com/unknwon/com v1.0.1/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
+github.com/unrolled/render v1.4.0/go.mod h1:cK4RSTTVdND5j9EYEc0LAMOvdG11JeiKjyjfyZRvV2w=
+github.com/unrolled/render v1.5.0 h1:uNTHMvVoI9pyyXfgoDHHycIqFONNY2p4eQR9ty+NsxM=
+github.com/unrolled/render v1.5.0/go.mod h1:eLTosBkQqEPEk7pRfkCRApXd++lm++nCsVlFOHpeedw=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
+go.wandrs.dev/binding v0.0.3-0.20210906074014-ef9ff4cd15de h1:BDHsi0PWPAOQ4io+Ud9Mm4TtE5LaR553+RPTZ4Sp1EQ=
+go.wandrs.dev/binding v0.0.3-0.20210906074014-ef9ff4cd15de/go.mod h1:PKK3IdnYR5gQ4titLhtwobmn/j5wums4tTs20M9YFK4=
+go.wandrs.dev/http v0.0.2-0.20210906061900-da89ae48dd5b/go.mod h1:yukEsN4kwCfNBl3l7EmW/GdBlowAEoDaTmwkHc0C9Tk=
+go.wandrs.dev/http v0.0.4-0.20230711115341-e39ae8d39a48 h1:SB1j/H8sqFZfG82zIVVX6xIYd/CcKzJW5GuNEn5rp38=
+go.wandrs.dev/http v0.0.4-0.20230711115341-e39ae8d39a48/go.mod h1:lMGsS4+SlAvcu8erdnKddtBju//UnJ4IVxbezE4e3+g=
+go.wandrs.dev/inject v0.0.2-0.20210830111053-4cf53490454a h1:2rNrwd8Fh+QmKNYMZNaw1qIf5rOt2RfDJhiDpacg2lI=
+go.wandrs.dev/inject v0.0.2-0.20210830111053-4cf53490454a/go.mod h1:ZN2TWSS270vSn0ci86WBgyvXO4WFooiU8Qyvxz28l2k=
+go.wandrs.dev/session v0.0.0-20230714090251-542544d34622 h1:xwguTfjWP6jo62Enlil6TuxiBaqyOjHFSmChVCOu45A=
+go.wandrs.dev/session v0.0.0-20230714090251-542544d34622/go.mod h1:mCZ/apz3lJhuyMLzSdqZ9WmriKq0o6BFAHHJHT9Yopk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
+golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190802220118-1d1727260058/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e/go.mod h1:xsQCaysVCudhrYTfzYWe577fCe7Ceci+6qjO2Rdc0Z4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/macaron.v1 v1.3.4 h1:HvIscOwxhFhx3swWM/979wh2QMYyuXrNmrF9l+j3HZs=
gopkg.in/macaron.v1 v1.3.4/go.mod h1:/RoHTdC8ALpyJ3+QR36mKjwnT1F1dyYtsGM9Ate6ZFI=
+gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/redis.v2 v2.3.2/go.mod h1:4wl9PJ/CqzeHk3LVq1hNLHH8krm3+AXEgut4jVc++LU=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
+k8s.io/apimachinery v0.25.1 h1:t0XrnmCEHVgJlR2arwO8Awp9ylluDic706WePaYCBTI=
+k8s.io/apimachinery v0.25.1/go.mod h1:hqqA1X0bsgsxI6dXsJ4HnNTBOmJNxyPp8dw3u2fSHwA=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
+k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
+k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
+sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig
new file mode 100644
index 0000000..fad8958
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*.go]
+indent_style = tab
+indent_size = 4
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_style = space
+indent_size = 2
+insert_final_newline = true
+trim_trailing_whitespace = true
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes
new file mode 100644
index 0000000..32f1001
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitattributes
@@ -0,0 +1 @@
+go.sum linguist-generated
diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore
new file mode 100644
index 0000000..4cd0cba
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.gitignore
@@ -0,0 +1,6 @@
+# Setup a Global .gitignore for OS and editor generated files:
+# https://help.github.com/articles/ignoring-files
+# git config --global core.excludesfile ~/.gitignore_global
+
+.vagrant
+*.sublime-project
diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap
new file mode 100644
index 0000000..a04f290
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/.mailmap
@@ -0,0 +1,2 @@
+Chris Howey
+Nathan Youngman <4566+nathany@users.noreply.github.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000..6cbabe5
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,62 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
+
+# Please keep the list sorted.
+
+Aaron L
+Adrien Bustany
+Alexey Kazakov
+Amit Krishnan
+Anmol Sethi
+Bjørn Erik Pedersen
+Brian Goff
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Eric Lin
+Evan Phoenix
+Francisco Souza
+Gautam Dey
+Hari haran
+Ichinose Shogo
+Johannes Ebke
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Matthias Stone
+Nathan Youngman
+Nickolai Zeldovich
+Oliver Bristow
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pratik Shinde
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Rodrigo Chiossi
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Tobias Klauser
+Tom Payne
+Travis Cline
+Tudor Golubenco
+Vahe Khachikyan
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000..cc01c08
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,357 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.5.4] - 2022-04-25
+
+* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
+* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
+* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
+
+## [1.5.3] - 2022-04-22
+
+* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
+
+## [1.5.2] - 2022-04-21
+
+* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
+* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
+* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
+* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
+* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
+
+## [1.5.1] - 2021-08-24
+
+* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
+
+## [1.5.0] - 2021-08-20
+
+* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
+* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
+* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
+* CI: Use GitHub Actions for CI and cover go 1.12-1.17
+ [#378](https://github.com/fsnotify/fsnotify/pull/378)
+ [#381](https://github.com/fsnotify/fsnotify/pull/381)
+ [#385](https://github.com/fsnotify/fsnotify/pull/385)
+* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
+
+## [1.4.7] - 2018-01-09
+
+* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
+* Tests: Fix missing verb on format string (thanks @rchiossi)
+* Linux: Fix deadlock in Remove (thanks @aarondl)
+* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
+* Docs: Moved FAQ into the README (thanks @vahe)
+* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
+* Docs: replace references to OS X with macOS
+
+## [1.4.2] - 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## [1.4.1] - 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## [1.4.0] - 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## [1.3.1] - 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## [1.3.0] - 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## [1.2.10] - 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## [1.2.9] - 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## [1.2.8] - 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## [1.2.5] - 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## [1.2.1] - 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## [1.2.0] - 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## [1.1.1] - 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## [1.1.0] - 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [1.0.4] - 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## [1.0.3] - 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## [1.0.2] - 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## [1.0.0] - 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## [0.9.3] - 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## [0.9.2] - 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## [0.9.1] - 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## [0.9.0] - 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## [0.8.12] - 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## [0.8.11] - 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## [0.8.10] - 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## [0.8.9] - 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## [0.8.8] - 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## [0.8.7] - 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## [0.8.6] - 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## [0.8.5] - 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## [0.8.4] - 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## [0.8.3] - 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## [0.8.2] - 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## [0.8.1] - 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## [0.8.0] - 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## [0.7.4] - 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## [0.7.3] - 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## [0.7.2] - 2012-09-01
+
+* kqueue: events for created directories
+
+## [0.7.1] - 2012-07-14
+
+* [Fix] for renaming files
+
+## [0.7.0] - 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## [0.6.0] - 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## [0.5.1] - 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## [0.5.0] - 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## [0.4.0] - 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## [0.3.0] - 2012-02-19
+
+* kqueue: add files when watch directory
+
+## [0.2.0] - 2011-12-30
+
+* update to latest Go weekly code
+
+## [0.1.0] - 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000..8a64256
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,60 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000..e180c8f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000..0731c5e
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,120 @@
+# File system notifications for Go
+
+[](https://pkg.go.dev/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [](https://github.com/fsnotify/fsnotify/issues/413)
+
+fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+| Adapter | OS | Status |
+| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
+| inotify | Linux 2.6.27 or later, Android\* | Supported |
+| kqueue | BSD, macOS, iOS\* | Supported |
+| ReadDirectoryChangesW | Windows | Supported |
+| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
+| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
+| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
+| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
+| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+func main() {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer watcher.Close()
+
+ done := make(chan bool)
+ go func() {
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ log.Println("event:", event)
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ log.Println("modified file:", event.Name)
+ }
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Println("error:", err)
+ }
+ }
+ }()
+
+ err = watcher.Add("/tmp/foo")
+ if err != nil {
+ log.Fatal(err)
+ }
+ <-done
+}
+```
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
+
+fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000..b3ac3d8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,38 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build solaris
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000..0f4ee52
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,69 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !plan9
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var (
+ ErrEventOverflow = errors.New("fsnotify queue overflow")
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
new file mode 100644
index 0000000..5968855
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go
@@ -0,0 +1,36 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
+
+package fsnotify
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct{}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/go.mod b/vendor/github.com/fsnotify/fsnotify/go.mod
new file mode 100644
index 0000000..48cfd07
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.mod
@@ -0,0 +1,10 @@
+module github.com/fsnotify/fsnotify
+
+go 1.16
+
+require golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
+
+retract (
+ v1.5.3 // Published an incorrect branch accidentally https://github.com/fsnotify/fsnotify/issues/445
+ v1.5.0 // Contains symlink regression https://github.com/fsnotify/fsnotify/pull/394
+)
diff --git a/vendor/github.com/fsnotify/fsnotify/go.sum b/vendor/github.com/fsnotify/fsnotify/go.sum
new file mode 100644
index 0000000..7f2d82d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000..a6d0e0e
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,351 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ return nil
+}
+
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
+ w.mu.Unlock()
+
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000..b572a37
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000..6fb8d85
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,535 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+
+ // copy paths to remove while locked
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ for _, name := range pathsToRemove {
+ w.Remove(name)
+ }
+
+ // send a "quit" message to the reader goroutine
+ close(w.done)
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for pathname := range w.watches {
+ entries = append(entries, pathname)
+ }
+
+ return entries
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+loop:
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ break loop
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ break loop
+ }
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel.
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ break loop
+ }
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+
+ // cleanup
+ err := unix.Close(w.kq)
+ if err != nil {
+ // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
+ select {
+ case w.Errors <- err:
+ default:
+ }
+ }
+ close(w.Events)
+ close(w.Errors)
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ select {
+ case w.Events <- newCreateEvent(filePath):
+ case <-w.done:
+ return
+ }
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 0000000..36cc384
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build freebsd || openbsd || netbsd || dragonfly
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000..98cd847
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000..02ce7de
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,586 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// WatchList returns the directories and files that are being monitered.
+func (w *Watcher) WatchList() []string {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ entries := make([]string, 0, len(w.watches))
+ for _, entry := range w.watches {
+ for _, watchEntry := range entry {
+ entries = append(entries, watchEntry.path)
+ }
+ }
+
+ return entries
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ // TODO: Consider using unsafe.Slice that is available from go1.17
+ // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
+ // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
+ size := int(raw.FileNameLength / 2)
+ var buf []uint16
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+ sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
+ sh.Len = size
+ sh.Cap = size
+ name := syscall.UTF16ToString(buf)
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/go-chi/chi/v5/.gitignore b/vendor/github.com/go-chi/chi/v5/.gitignore
new file mode 100644
index 0000000..ba22c99
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/.gitignore
@@ -0,0 +1,3 @@
+.idea
+*.sw?
+.vscode
diff --git a/vendor/github.com/go-chi/chi/v5/CHANGELOG.md b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md
new file mode 100644
index 0000000..a1feeec
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/CHANGELOG.md
@@ -0,0 +1,320 @@
+# Changelog
+
+## v5.0.8 (2022-12-07)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.7...v5.0.8
+
+
+## v5.0.7 (2021-11-18)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.6...v5.0.7
+
+
+## v5.0.6 (2021-11-15)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.5...v5.0.6
+
+
+## v5.0.5 (2021-10-27)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.4...v5.0.5
+
+
+## v5.0.4 (2021-08-29)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.3...v5.0.4
+
+
+## v5.0.3 (2021-04-29)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.2...v5.0.3
+
+
+## v5.0.2 (2021-03-25)
+
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.1...v5.0.2
+
+
+## v5.0.1 (2021-03-10)
+
+- Small improvements
+- History of changes: see https://github.com/go-chi/chi/compare/v5.0.0...v5.0.1
+
+
+## v5.0.0 (2021-02-27)
+
+- chi v5, `github.com/go-chi/chi/v5` introduces the adoption of Go's SIV to adhere to the current state-of-the-tools in Go.
+- chi v1.5.x did not work out as planned, as the Go tooling is too powerful and chi's adoption is too wide.
+ The most responsible thing to do for everyone's benefit is to just release v5 with SIV, so I present to you all,
+ chi v5 at `github.com/go-chi/chi/v5`. I hope someday the developer experience and ergonomics I've been seeking
+ will still come to fruition in some form, see https://github.com/golang/go/issues/44550
+- History of changes: see https://github.com/go-chi/chi/compare/v1.5.4...v5.0.0
+
+
+## v1.5.4 (2021-02-27)
+
+- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release
+- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4
+
+
+## v1.5.3 (2021-02-21)
+
+- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support
+- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3
+
+
+## v1.5.2 (2021-02-10)
+
+- Reverting allocation optimization as a precaution as go test -race fails.
+- Minor improvements, see history below
+- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2
+
+
+## v1.5.1 (2020-12-06)
+
+- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for
+ your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README.
+- `middleware.CleanPath`: new middleware that clean's request path of double slashes
+- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext`
+- plus other tiny improvements, see full commit history below
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1
+
+
+## v1.5.0 (2020-11-12) - now with go.mod support
+
+`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced
+context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything
+else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies,
+and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very
+incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it
+makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years
+to who all help make chi better (total of 86 contributors to date -- thanks all!).
+
+Chi has been an labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance
+and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size,
+and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting
+middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from
+companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of
+joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :)
+
+For me, the asthetics of chi's code and usage are very important. With the introduction of Go's module support
+(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path
+of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462.
+Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import
+path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design,
+aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6",
+and upgrading between versions in the future will also be just incremental.
+
+I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing",
+as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and
+is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy,
+while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of
+v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's
+largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod.
+However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just
+`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains
+go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago.
+Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and
+backwards-compatible improvements/fixes will bump a "tiny" release.
+
+For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`,
+which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run
+`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+
+built with go.mod support.
+
+My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very
+minor request which is backwards compatible and won't break your existing installations.
+
+Cheers all, happy coding!
+
+
+---
+
+
+## v4.1.2 (2020-06-02)
+
+- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
+- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
+
+
+## v4.1.1 (2020-04-16)
+
+- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
+ route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
+- new middleware.RouteHeaders as a simple router for request headers with wildcard support
+- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
+
+
+## v4.1.0 (2020-04-1)
+
+- middleware.LogEntry: Write method on interface now passes the response header
+ and an extra interface type useful for custom logger implementations.
+- middleware.WrapResponseWriter: minor fix
+- middleware.Recoverer: a bit prettier
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
+
+## v4.0.4 (2020-03-24)
+
+- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
+- a few minor improvements and fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
+
+
+## v4.0.3 (2020-01-09)
+
+- core: fix regexp routing to include default value when param is not matched
+- middleware: rewrite of middleware.Compress
+- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
+
+
+## v4.0.2 (2019-02-26)
+
+- Minor fixes
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
+
+
+## v4.0.1 (2019-01-21)
+
+- Fixes issue with compress middleware: #382 #385
+- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
+
+
+## v4.0.0 (2019-01-10)
+
+- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
+- router: respond with 404 on router with no routes (#362)
+- router: additional check to ensure wildcard is at the end of a url pattern (#333)
+- middleware: deprecate use of http.CloseNotifier (#347)
+- middleware: fix RedirectSlashes to include query params on redirect (#334)
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
+
+
+## v3.3.4 (2019-01-07)
+
+- Minor middleware improvements. No changes to core library/router. Moving v3 into its
+- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
+- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
+
+
+## v3.3.3 (2018-08-27)
+
+- Minor release
+- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
+
+
+## v3.3.2 (2017-12-22)
+
+- Support to route trailing slashes on mounted sub-routers (#281)
+- middleware: new `ContentCharset` to check matching charsets. Thank you
+ @csucu for your community contribution!
+
+
+## v3.3.1 (2017-11-20)
+
+- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
+- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
+- Minor bug fixes
+
+
+## v3.3.0 (2017-10-10)
+
+- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
+- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
+
+
+## v3.2.1 (2017-08-31)
+
+- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
+ and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
+- Add new `RouteMethod` to `*Context`
+- Add new `Routes` pointer to `*Context`
+- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
+- Updated benchmarks (see README)
+
+
+## v3.1.5 (2017-08-02)
+
+- Setup golint and go vet for the project
+- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
+ to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
+
+
+## v3.1.0 (2017-07-10)
+
+- Fix a few minor issues after v3 release
+- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
+- Move `render` sub-pkg to https://github.com/go-chi/render
+- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
+ suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
+ https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
+
+
+## v3.0.0 (2017-06-21)
+
+- Major update to chi library with many exciting updates, but also some *breaking changes*
+- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
+ `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
+ same router
+- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
+ `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
+- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
+ `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
+ in `_examples/custom-handler`
+- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
+ own using file handler with the stdlib, see `_examples/fileserver` for an example
+- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
+- Moved the chi project to its own organization, to allow chi-related community packages to
+ be easily discovered and supported, at: https://github.com/go-chi
+- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
+- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
+
+
+## v2.1.0 (2017-03-30)
+
+- Minor improvements and update to the chi core library
+- Introduced a brand new `chi/render` sub-package to complete the story of building
+ APIs to offer a pattern for managing well-defined request / response payloads. Please
+ check out the updated `_examples/rest` example for how it works.
+- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
+
+
+## v2.0.0 (2017-01-06)
+
+- After many months of v2 being in an RC state with many companies and users running it in
+ production, the inclusion of some improvements to the middlewares, we are very pleased to
+ announce v2.0.0 of chi.
+
+
+## v2.0.0-rc1 (2016-07-26)
+
+- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
+ community `"net/context"` package has been included in the standard library as `"context"` and
+ utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
+ request-scoped values. We're very excited about the new context addition and are proud to
+ introduce chi v2, a minimal and powerful routing package for building large HTTP services,
+ with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
+ stdlib HTTP handlers and middlwares.
+- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
+- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
+- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
+ which provides direct access to URL routing parameters, the routing path and the matching
+ routing patterns.
+- Users upgrading from chi v1 to v2, need to:
+ 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
+ the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
+ 2. Use `chi.URLParam(r *http.Request, paramKey string) string`
+ or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
+
+
+## v1.0.0 (2016-07-01)
+
+- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
+
+
+## v0.9.0 (2016-03-31)
+
+- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
+- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
+ has changed to: `chi.URLParam(ctx, "id")`
diff --git a/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
new file mode 100644
index 0000000..c0ac2df
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing
+
+## Prerequisites
+
+1. [Install Go][go-install].
+2. Download the sources and switch the working directory:
+
+ ```bash
+ go get -u -d github.com/go-chi/chi
+ cd $GOPATH/src/github.com/go-chi/chi
+ ```
+
+## Submitting a Pull Request
+
+A typical workflow is:
+
+1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
+2. [Create a topic branch.][branch]
+3. Add tests for your change.
+4. Run `go test`. If your tests pass, return to the step 3.
+5. Implement the change and ensure the steps from the previous step pass.
+6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
+7. [Add, commit and push your changes.][git-help]
+8. [Submit a pull request.][pull-req]
+
+[go-install]: https://golang.org/doc/install
+[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
+[fork]: https://help.github.com/articles/fork-a-repo
+[branch]: http://learn.github.com/p/branching.html
+[git-help]: https://guides.github.com
+[pull-req]: https://help.github.com/articles/using-pull-requests
diff --git a/vendor/github.com/go-chi/chi/v5/LICENSE b/vendor/github.com/go-chi/chi/v5/LICENSE
new file mode 100644
index 0000000..d99f02f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/go-chi/chi/v5/Makefile b/vendor/github.com/go-chi/chi/v5/Makefile
new file mode 100644
index 0000000..e0f18c7
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/Makefile
@@ -0,0 +1,22 @@
+.PHONY: all
+all:
+ @echo "**********************************************************"
+ @echo "** chi build tool **"
+ @echo "**********************************************************"
+
+
+.PHONY: test
+test:
+ go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware
+
+.PHONY: test-router
+test-router:
+ go test -race -v .
+
+.PHONY: test-middleware
+test-middleware:
+ go test -race -v ./middleware
+
+.PHONY: docs
+docs:
+ npx docsify-cli serve ./docs
diff --git a/vendor/github.com/go-chi/chi/v5/README.md b/vendor/github.com/go-chi/chi/v5/README.md
new file mode 100644
index 0000000..3e4cc4a
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/README.md
@@ -0,0 +1,500 @@
+#
+
+
+[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
+
+`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
+especially good at helping you write large REST API services that are kept maintainable as your
+project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
+handle signaling, cancelation and request-scoped values across a handler chain.
+
+The focus of the project has been to seek out an elegant and comfortable design for writing
+REST API servers, written during the development of the Pressly API service that powers our
+public API service, which in turn powers all of our client-side applications.
+
+The key considerations of chi's design are: project structure, maintainability, standard http
+handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
+parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
+included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render)
+and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
+
+## Install
+
+`go get -u github.com/go-chi/chi/v5`
+
+
+## Features
+
+* **Lightweight** - cloc'd in ~1000 LOC for the chi router
+* **Fast** - yes, see [benchmarks](#benchmarks)
+* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
+* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting
+* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
+* **Robust** - in production at Pressly, Cloudflare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
+* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
+* **Go.mod support** - as of v5, go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md))
+* **No external dependencies** - plain ol' Go stdlib + net/http
+
+
+## Examples
+
+See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
+
+
+**As easy as:**
+
+```go
+package main
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/go-chi/chi/v5/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+ r.Use(middleware.Logger)
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("welcome"))
+ })
+ http.ListenAndServe(":3000", r)
+}
+```
+
+**REST Preview:**
+
+Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
+in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
+Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
+
+I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
+above, they will show you all the features of chi and serve as a good form of documentation.
+
+```go
+import (
+ //...
+ "context"
+ "github.com/go-chi/chi/v5"
+ "github.com/go-chi/chi/v5/middleware"
+)
+
+func main() {
+ r := chi.NewRouter()
+
+ // A good base middleware stack
+ r.Use(middleware.RequestID)
+ r.Use(middleware.RealIP)
+ r.Use(middleware.Logger)
+ r.Use(middleware.Recoverer)
+
+ // Set a timeout value on the request context (ctx), that will signal
+ // through ctx.Done() that the request has timed out and further
+ // processing should be stopped.
+ r.Use(middleware.Timeout(60 * time.Second))
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hi"))
+ })
+
+ // RESTy routes for "articles" resource
+ r.Route("/articles", func(r chi.Router) {
+ r.With(paginate).Get("/", listArticles) // GET /articles
+ r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
+
+ r.Post("/", createArticle) // POST /articles
+ r.Get("/search", searchArticles) // GET /articles/search
+
+ // Regexp url parameters:
+ r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
+
+ // Subrouters:
+ r.Route("/{articleID}", func(r chi.Router) {
+ r.Use(ArticleCtx)
+ r.Get("/", getArticle) // GET /articles/123
+ r.Put("/", updateArticle) // PUT /articles/123
+ r.Delete("/", deleteArticle) // DELETE /articles/123
+ })
+ })
+
+ // Mount the admin sub-router
+ r.Mount("/admin", adminRouter())
+
+ http.ListenAndServe(":3333", r)
+}
+
+func ArticleCtx(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ articleID := chi.URLParam(r, "articleID")
+ article, err := dbGetArticle(articleID)
+ if err != nil {
+ http.Error(w, http.StatusText(404), 404)
+ return
+ }
+ ctx := context.WithValue(r.Context(), "article", article)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+
+func getArticle(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ article, ok := ctx.Value("article").(*Article)
+ if !ok {
+ http.Error(w, http.StatusText(422), 422)
+ return
+ }
+ w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
+}
+
+// A completely separate router for administrator routes
+func adminRouter() http.Handler {
+ r := chi.NewRouter()
+ r.Use(AdminOnly)
+ r.Get("/", adminIndex)
+ r.Get("/accounts", adminListAccounts)
+ return r
+}
+
+func AdminOnly(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ perm, ok := ctx.Value("acl.permission").(YourPermissionType)
+ if !ok || !perm.IsAdmin() {
+ http.Error(w, http.StatusText(403), 403)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+
+## Router interface
+
+chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
+The router is fully compatible with `net/http`.
+
+Built on top of the tree is the `Router` interface:
+
+```go
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the github.com/go-chi/docgen package to generate documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+```
+
+Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
+supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
+can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
+and `chi.URLParam(r, "*")` for a wildcard parameter.
+
+
+### Middleware handlers
+
+chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
+about them, which means the router and all the tooling is designed to be compatible and
+friendly with any middleware in the community. This offers much better extensibility and reuse
+of packages and is at the heart of chi's purpose.
+
+Here is an example of a standard net/http middleware where we assign a context key `"user"`
+the value of `"123"`. This middleware sets a hypothetical user identifier on the request
+context and calls the next handler in the chain.
+
+```go
+// HTTP middleware setting a value on the request context
+func MyMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // create new context from `r` request context, and assign key `"user"`
+ // to value of `"123"`
+ ctx := context.WithValue(r.Context(), "user", "123")
+
+ // call the next handler in the chain, passing the response writer and
+ // the updated request object with the new context value.
+ //
+ // note: context.Context values are nested, so any previously set
+ // values will be accessible as well, and the new `"user"` key
+ // will be accessible from this point forward.
+ next.ServeHTTP(w, r.WithContext(ctx))
+ })
+}
+```
+
+
+### Request handlers
+
+chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
+func that reads a user identifier from the request context - hypothetically, identifying
+the user sending an authenticated request, validated+set by a previous middleware handler.
+
+```go
+// HTTP handler accessing data from the request context.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ // here we read from the request context and fetch out `"user"` key set in
+ // the MyMiddleware example above.
+ user := r.Context().Value("user").(string)
+
+ // respond to the client
+ w.Write([]byte(fmt.Sprintf("hi %s", user)))
+}
+```
+
+
+### URL parameters
+
+chi's router parses and stores URL parameters right onto the request context. Here is
+an example of how to access URL params in your net/http handlers. And of course, middlewares
+are able to access the same information.
+
+```go
+// HTTP handler accessing the url routing parameters.
+func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
+ // fetch the url parameter `"userID"` from the request of a matching
+ // routing pattern. An example routing pattern could be: /users/{userID}
+ userID := chi.URLParam(r, "userID")
+
+ // fetch `"key"` from the request context
+ ctx := r.Context()
+ key := ctx.Value("key").(string)
+
+ // respond to the client
+ w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
+}
+```
+
+
+## Middlewares
+
+chi comes equipped with an optional `middleware` package, providing a suite of standard
+`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
+with `net/http` can be used with chi's mux.
+
+### Core middlewares
+
+----------------------------------------------------------------------------------------------------
+| chi/middleware Handler | description |
+| :--------------------- | :---------------------------------------------------------------------- |
+| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers |
+| [AllowContentType] | Explicit whitelist of accepted request Content-Types |
+| [BasicAuth] | Basic HTTP authentication |
+| [Compress] | Gzip compression for clients that accept compressed responses |
+| [ContentCharset] | Ensure charset for Content-Type request headers |
+| [CleanPath] | Clean double slashes from request path |
+| [GetHead] | Automatically route undefined HEAD requests to GET handlers |
+| [Heartbeat] | Monitoring endpoint to check the servers pulse |
+| [Logger] | Logs the start and end of each request with the elapsed processing time |
+| [NoCache] | Sets response headers to prevent clients from caching |
+| [Profiler] | Easily attach net/http/pprof to your routers |
+| [RealIP] | Sets a http.Request's RemoteAddr to either X-Real-IP or X-Forwarded-For |
+| [Recoverer] | Gracefully absorb panics and prints the stack trace |
+| [RequestID] | Injects a request ID into the context of each request |
+| [RedirectSlashes] | Redirect slashes on routing paths |
+| [RouteHeaders] | Route handling for request headers |
+| [SetHeader] | Short-hand middleware to set a response header key/value |
+| [StripSlashes] | Strip slashes on routing paths |
+| [Throttle] | Puts a ceiling on the number of concurrent requests |
+| [Timeout] | Signals to the request context when the timeout deadline is reached |
+| [URLFormat] | Parse extension from url and put it on request context |
+| [WithValue] | Short-hand middleware to set a key/value on the request context |
+----------------------------------------------------------------------------------------------------
+
+[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding
+[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType
+[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth
+[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress
+[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset
+[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath
+[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead
+[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID
+[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat
+[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger
+[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache
+[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler
+[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP
+[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer
+[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes
+[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger
+[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID
+[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders
+[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader
+[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes
+[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle
+[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog
+[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts
+[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout
+[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat
+[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry
+[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue
+[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor
+[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter
+[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc
+[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute
+[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter
+[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry
+[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter
+[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface
+[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts
+[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter
+
+### Extra middlewares & packages
+
+Please see https://github.com/go-chi for additional packages.
+
+--------------------------------------------------------------------------------------------------------------------
+| package | description |
+|:---------------------------------------------------|:-------------------------------------------------------------
+| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
+| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
+| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
+| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
+| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
+| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
+| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
+| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
+| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
+--------------------------------------------------------------------------------------------------------------------
+
+
+## context?
+
+`context` is a tiny pkg that provides simple interface to signal context across call stacks
+and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
+and is available in stdlib since go1.7.
+
+Learn more at https://blog.golang.org/context
+
+and..
+* Docs: https://golang.org/pkg/context
+* Source: https://github.com/golang/go/tree/master/src/context
+
+
+## Benchmarks
+
+The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
+
+Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x
+
+```shell
+BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op
+BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op
+BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op
+BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op
+BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op
+```
+
+Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
+
+NOTE: the allocs in the benchmark above are from the calls to http.Request's
+`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
+on the duplicated (alloc'd) request and returns it the new request object. This is just
+how setting context on a request in Go works.
+
+
+## Credits
+
+* Carl Jackson for https://github.com/zenazn/goji
+ * Parts of chi's thinking comes from goji, and chi's middleware package
+ sources from goji.
+* Armon Dadgar for https://github.com/armon/go-radix
+* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
+
+We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
+
+
+## Beyond REST
+
+chi is just a http router that lets you decompose request handling into many smaller layers.
+Many companies use chi to write REST services for their public APIs. But, REST is just a convention
+for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
+system or network of microservices.
+
+Looking beyond REST, I also recommend some newer works in the field:
+* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
+* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
+* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
+* [NATS](https://nats.io) - lightweight pub-sub
+
+
+## License
+
+Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
+
+Licensed under [MIT License](./LICENSE)
+
+[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi?tab=versions
+[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
+[Travis]: https://travis-ci.org/go-chi/chi
+[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master
diff --git a/vendor/github.com/go-chi/chi/v5/chain.go b/vendor/github.com/go-chi/chi/v5/chain.go
new file mode 100644
index 0000000..a227841
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/chain.go
@@ -0,0 +1,49 @@
+package chi
+
+import "net/http"
+
+// Chain returns a Middlewares type from a slice of middleware handlers.
+func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
+ return Middlewares(middlewares)
+}
+
+// Handler builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) Handler(h http.Handler) http.Handler {
+ return &ChainHandler{h, chain(mws, h), mws}
+}
+
+// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
+// with `h http.Handler` as the final handler.
+func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
+ return &ChainHandler{h, chain(mws, h), mws}
+}
+
+// ChainHandler is a http.Handler with support for handler composition and
+// execution.
+type ChainHandler struct {
+ Endpoint http.Handler
+ chain http.Handler
+ Middlewares Middlewares
+}
+
+func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ c.chain.ServeHTTP(w, r)
+}
+
+// chain builds a http.Handler composed of an inline middleware stack and endpoint
+// handler in the order they are passed.
+func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
+ // Return ahead of time if there aren't any middlewares for the chain
+ if len(middlewares) == 0 {
+ return endpoint
+ }
+
+ // Wrap the end handler with the middleware chain
+ h := middlewares[len(middlewares)-1](endpoint)
+ for i := len(middlewares) - 2; i >= 0; i-- {
+ h = middlewares[i](h)
+ }
+
+ return h
+}
diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go
new file mode 100644
index 0000000..a1691bb
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/chi.go
@@ -0,0 +1,134 @@
+// Package chi is a small, idiomatic and composable router for building HTTP services.
+//
+// chi requires Go 1.14 or newer.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/go-chi/chi/v5"
+// "github.com/go-chi/chi/v5/middleware"
+// )
+//
+// func main() {
+// r := chi.NewRouter()
+// r.Use(middleware.Logger)
+// r.Use(middleware.Recoverer)
+//
+// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+// w.Write([]byte("root."))
+// })
+//
+// http.ListenAndServe(":3333", r)
+// }
+//
+// See github.com/go-chi/chi/_examples/ for more in-depth examples.
+//
+// URL patterns allow for easy matching of path components in HTTP
+// requests. The matching components can then be accessed using
+// chi.URLParam(). All patterns must begin with a slash.
+//
+// A simple named placeholder {name} matches any sequence of characters
+// up to the next / or the end of the URL. Trailing slashes on paths must
+// be handled explicitly.
+//
+// A placeholder with a name followed by a colon allows a regular
+// expression match, for example {number:\\d+}. The regular expression
+// syntax is Go's normal regexp RE2 syntax, except that regular expressions
+// including { or } are not supported, and / will never be
+// matched. An anonymous regexp pattern is allowed, using an empty string
+// before the colon in the placeholder, such as {:\\d+}
+//
+// The special placeholder of asterisk matches the rest of the requested
+// URL. Any trailing characters in the pattern are ignored. This is the only
+// placeholder which will match / characters.
+//
+// Examples:
+//
+// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
+// "/user/{name}/info" matches "/user/jsmith/info"
+// "/page/*" matches "/page/intro/latest"
+// "/page/{other}/index" also matches "/page/intro/latest"
+// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
+package chi
+
+import "net/http"
+
+// NewRouter returns a new Mux object that implements the Router interface.
+func NewRouter() *Mux {
+ return NewMux()
+}
+
+// Router consisting of the core routing methods used by chi's Mux,
+// using only the standard net/http.
+type Router interface {
+ http.Handler
+ Routes
+
+ // Use appends one or more middlewares onto the Router stack.
+ Use(middlewares ...func(http.Handler) http.Handler)
+
+ // With adds inline middlewares for an endpoint handler.
+ With(middlewares ...func(http.Handler) http.Handler) Router
+
+ // Group adds a new inline-Router along the current routing
+ // path, with a fresh middleware stack for the inline-Router.
+ Group(fn func(r Router)) Router
+
+ // Route mounts a sub-Router along a `pattern`` string.
+ Route(pattern string, fn func(r Router)) Router
+
+ // Mount attaches another http.Handler along ./pattern/*
+ Mount(pattern string, h http.Handler)
+
+ // Handle and HandleFunc adds routes for `pattern` that matches
+ // all HTTP methods.
+ Handle(pattern string, h http.Handler)
+ HandleFunc(pattern string, h http.HandlerFunc)
+
+ // Method and MethodFunc adds routes for `pattern` that matches
+ // the `method` HTTP method.
+ Method(method, pattern string, h http.Handler)
+ MethodFunc(method, pattern string, h http.HandlerFunc)
+
+ // HTTP-method routing along `pattern`
+ Connect(pattern string, h http.HandlerFunc)
+ Delete(pattern string, h http.HandlerFunc)
+ Get(pattern string, h http.HandlerFunc)
+ Head(pattern string, h http.HandlerFunc)
+ Options(pattern string, h http.HandlerFunc)
+ Patch(pattern string, h http.HandlerFunc)
+ Post(pattern string, h http.HandlerFunc)
+ Put(pattern string, h http.HandlerFunc)
+ Trace(pattern string, h http.HandlerFunc)
+
+ // NotFound defines a handler to respond whenever a route could
+ // not be found.
+ NotFound(h http.HandlerFunc)
+
+ // MethodNotAllowed defines a handler to respond whenever a method is
+ // not allowed.
+ MethodNotAllowed(h http.HandlerFunc)
+}
+
+// Routes interface adds two methods for router traversal, which is also
+// used by the `docgen` subpackage to generation documentation for Routers.
+type Routes interface {
+ // Routes returns the routing tree in an easily traversable structure.
+ Routes() []Route
+
+ // Middlewares returns the list of middlewares in use by the router.
+ Middlewares() Middlewares
+
+ // Match searches the routing tree for a handler that matches
+ // the method/path - similar to routing a http request, but without
+ // executing the handler thereafter.
+ Match(rctx *Context, method, path string) bool
+}
+
+// Middlewares type is a slice of standard middleware handlers with methods
+// to compose middleware chains and http.Handler's.
+type Middlewares []func(http.Handler) http.Handler
diff --git a/vendor/github.com/go-chi/chi/v5/context.go b/vendor/github.com/go-chi/chi/v5/context.go
new file mode 100644
index 0000000..e78a238
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/context.go
@@ -0,0 +1,159 @@
+package chi
+
+import (
+ "context"
+ "net/http"
+ "strings"
+)
+
+// URLParam returns the url parameter from a http.Request object.
+func URLParam(r *http.Request, key string) string {
+ if rctx := RouteContext(r.Context()); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// URLParamFromCtx returns the url parameter from a http.Request Context.
+func URLParamFromCtx(ctx context.Context, key string) string {
+ if rctx := RouteContext(ctx); rctx != nil {
+ return rctx.URLParam(key)
+ }
+ return ""
+}
+
+// RouteContext returns chi's routing Context object from a
+// http.Request Context.
+func RouteContext(ctx context.Context) *Context {
+ val, _ := ctx.Value(RouteCtxKey).(*Context)
+ return val
+}
+
+// NewRouteContext returns a new routing Context object.
+func NewRouteContext() *Context {
+ return &Context{}
+}
+
+var (
+ // RouteCtxKey is the context.Context key to store the request context.
+ RouteCtxKey = &contextKey{"RouteContext"}
+)
+
+// Context is the default routing context set on the root node of a
+// request context to track route patterns, URL parameters and
+// an optional routing path.
+type Context struct {
+ Routes Routes
+
+ // parentCtx is the parent of this one, for using Context as a
+ // context.Context directly. This is an optimization that saves
+ // 1 allocation.
+ parentCtx context.Context
+
+ // Routing path/method override used during the route search.
+ // See Mux#routeHTTP method.
+ RoutePath string
+ RouteMethod string
+
+ // URLParams are the stack of routeParams captured during the
+ // routing lifecycle across a stack of sub-routers.
+ URLParams RouteParams
+
+ // Route parameters matched for the current sub-router. It is
+ // intentionally unexported so it cant be tampered.
+ routeParams RouteParams
+
+ // The endpoint routing pattern that matched the request URI path
+ // or `RoutePath` of the current sub-router. This value will update
+ // during the lifecycle of a request passing through a stack of
+ // sub-routers.
+ routePattern string
+
+ // Routing pattern stack throughout the lifecycle of the request,
+ // across all connected routers. It is a record of all matching
+ // patterns across a stack of sub-routers.
+ RoutePatterns []string
+
+ // methodNotAllowed hint
+ methodNotAllowed bool
+}
+
+// Reset a routing context to its initial state.
+func (x *Context) Reset() {
+ x.Routes = nil
+ x.RoutePath = ""
+ x.RouteMethod = ""
+ x.RoutePatterns = x.RoutePatterns[:0]
+ x.URLParams.Keys = x.URLParams.Keys[:0]
+ x.URLParams.Values = x.URLParams.Values[:0]
+
+ x.routePattern = ""
+ x.routeParams.Keys = x.routeParams.Keys[:0]
+ x.routeParams.Values = x.routeParams.Values[:0]
+ x.methodNotAllowed = false
+ x.parentCtx = nil
+}
+
+// URLParam returns the corresponding URL parameter value from the request
+// routing context.
+func (x *Context) URLParam(key string) string {
+ for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
+ if x.URLParams.Keys[k] == key {
+ return x.URLParams.Values[k]
+ }
+ }
+ return ""
+}
+
+// RoutePattern builds the routing pattern string for the particular
+// request, at the particular point during routing. This means, the value
+// will change throughout the execution of a request in a router. That is
+// why its advised to only use this value after calling the next handler.
+//
+// For example,
+//
+// func Instrument(next http.Handler) http.Handler {
+// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+// next.ServeHTTP(w, r)
+// routePattern := chi.RouteContext(r.Context()).RoutePattern()
+// measure(w, r, routePattern)
+// })
+// }
+func (x *Context) RoutePattern() string {
+ routePattern := strings.Join(x.RoutePatterns, "")
+ routePattern = replaceWildcards(routePattern)
+ routePattern = strings.TrimSuffix(routePattern, "//")
+ routePattern = strings.TrimSuffix(routePattern, "/")
+ return routePattern
+}
+
+// replaceWildcards takes a route pattern and recursively replaces all
+// occurrences of "/*/" to "/".
+func replaceWildcards(p string) string {
+ if strings.Contains(p, "/*/") {
+ return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
+ }
+ return p
+}
+
+// RouteParams is a structure to track URL routing parameters efficiently.
+type RouteParams struct {
+ Keys, Values []string
+}
+
+// Add will append a URL parameter to the end of the route param
+func (s *RouteParams) Add(key, value string) {
+ s.Keys = append(s.Keys, key)
+ s.Values = append(s.Values, value)
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/v5/go.mod b/vendor/github.com/go-chi/chi/v5/go.mod
new file mode 100644
index 0000000..c084c1d
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-chi/chi/v5
+
+go 1.14
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/basic_auth.go b/vendor/github.com/go-chi/chi/v5/middleware/basic_auth.go
new file mode 100644
index 0000000..a546c9e
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/basic_auth.go
@@ -0,0 +1,33 @@
+package middleware
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "net/http"
+)
+
+// BasicAuth implements a simple middleware handler for adding basic http auth to a route.
+func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ user, pass, ok := r.BasicAuth()
+ if !ok {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ credPass, credUserOk := creds[user]
+ if !credUserOk || subtle.ConstantTimeCompare([]byte(pass), []byte(credPass)) != 1 {
+ basicAuthFailed(w, realm)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+func basicAuthFailed(w http.ResponseWriter, realm string) {
+ w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
+ w.WriteHeader(http.StatusUnauthorized)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/clean_path.go b/vendor/github.com/go-chi/chi/v5/middleware/clean_path.go
new file mode 100644
index 0000000..adeba42
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/clean_path.go
@@ -0,0 +1,28 @@
+package middleware
+
+import (
+ "net/http"
+ "path"
+
+ "github.com/go-chi/chi/v5"
+)
+
+// CleanPath middleware will clean out double slash mistakes from a user's request path.
+// For example, if a user requests /users//1 or //users////1 will both be treated as: /users/1
+func CleanPath(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rctx := chi.RouteContext(r.Context())
+
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ rctx.RoutePath = path.Clean(routePath)
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/compress.go b/vendor/github.com/go-chi/chi/v5/middleware/compress.go
new file mode 100644
index 0000000..52f4e0b
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/compress.go
@@ -0,0 +1,399 @@
+package middleware
+
+import (
+ "bufio"
+ "compress/flate"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var defaultCompressibleContentTypes = []string{
+ "text/html",
+ "text/css",
+ "text/plain",
+ "text/javascript",
+ "application/javascript",
+ "application/x-javascript",
+ "application/json",
+ "application/atom+xml",
+ "application/rss+xml",
+ "image/svg+xml",
+}
+
+// Compress is a middleware that compresses response
+// body of a given content types to a data format based
+// on Accept-Encoding request header. It uses a given
+// compression level.
+//
+// NOTE: make sure to set the Content-Type header on your response
+// otherwise this middleware will not compress the response body. For ex, in
+// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
+// or set it manually.
+//
+// Passing a compression level of 5 is sensible value
+func Compress(level int, types ...string) func(next http.Handler) http.Handler {
+ compressor := NewCompressor(level, types...)
+ return compressor.Handler
+}
+
+// Compressor represents a set of encoding configurations.
+type Compressor struct {
+ // The mapping of encoder names to encoder functions.
+ encoders map[string]EncoderFunc
+ // The mapping of pooled encoders to pools.
+ pooledEncoders map[string]*sync.Pool
+ // The set of content types allowed to be compressed.
+ allowedTypes map[string]struct{}
+ allowedWildcards map[string]struct{}
+ // The list of encoders in order of decreasing precedence.
+ encodingPrecedence []string
+ level int // The compression level.
+}
+
+// NewCompressor creates a new Compressor that will handle encoding responses.
+//
+// The level should be one of the ones defined in the flate package.
+// The types are the content types that are allowed to be compressed.
+func NewCompressor(level int, types ...string) *Compressor {
+ // If types are provided, set those as the allowed types. If none are
+ // provided, use the default list.
+ allowedTypes := make(map[string]struct{})
+ allowedWildcards := make(map[string]struct{})
+ if len(types) > 0 {
+ for _, t := range types {
+ if strings.Contains(strings.TrimSuffix(t, "/*"), "*") {
+ panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t))
+ }
+ if strings.HasSuffix(t, "/*") {
+ allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{}
+ } else {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+ } else {
+ for _, t := range defaultCompressibleContentTypes {
+ allowedTypes[t] = struct{}{}
+ }
+ }
+
+ c := &Compressor{
+ level: level,
+ encoders: make(map[string]EncoderFunc),
+ pooledEncoders: make(map[string]*sync.Pool),
+ allowedTypes: allowedTypes,
+ allowedWildcards: allowedWildcards,
+ }
+
+ // Set the default encoders. The precedence order uses the reverse
+ // ordering that the encoders were added. This means adding new encoders
+ // will move them to the front of the order.
+ //
+ // TODO:
+ // lzma: Opera.
+ // sdch: Chrome, Android. Gzip output + dictionary header.
+ // br: Brotli, see https://github.com/go-chi/chi/pull/326
+
+ // HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
+ // wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
+ // checksum compared to CRC-32 used in "gzip" and thus is faster.
+ //
+ // But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
+ // raw DEFLATE data only, without the mentioned zlib wrapper.
+ // Because of this major confusion, most modern browsers try it
+ // both ways, first looking for zlib headers.
+ // Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
+ //
+ // The list of browsers having problems is quite big, see:
+ // http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ // https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
+ //
+ // That's why we prefer gzip over deflate. It's just more reliable
+ // and not significantly slower than deflate.
+ c.SetEncoder("deflate", encoderDeflate)
+
+ // TODO: Exception for old MSIE browsers that can't handle non-HTML?
+ // https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
+ c.SetEncoder("gzip", encoderGzip)
+
+ // NOTE: Not implemented, intentionally:
+ // case "compress": // LZW. Deprecated.
+ // case "bzip2": // Too slow on-the-fly.
+ // case "zopfli": // Too slow on-the-fly.
+ // case "xz": // Too slow on-the-fly.
+ return c
+}
+
+// SetEncoder can be used to set the implementation of a compression algorithm.
+//
+// The encoding should be a standardised identifier. See:
+// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
+//
+// For example, add the Brotli algortithm:
+//
+// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
+//
+// compressor := middleware.NewCompressor(5, "text/html")
+// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
+// params := brotli_enc.NewBrotliParams()
+// params.SetQuality(level)
+// return brotli_enc.NewBrotliWriter(params, w)
+// })
+func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
+ encoding = strings.ToLower(encoding)
+ if encoding == "" {
+ panic("the encoding can not be empty")
+ }
+ if fn == nil {
+ panic("attempted to set a nil encoder function")
+ }
+
+ // If we are adding a new encoder that is already registered, we have to
+ // clear that one out first.
+ if _, ok := c.pooledEncoders[encoding]; ok {
+ delete(c.pooledEncoders, encoding)
+ }
+ if _, ok := c.encoders[encoding]; ok {
+ delete(c.encoders, encoding)
+ }
+
+ // If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
+ encoder := fn(ioutil.Discard, c.level)
+ if encoder != nil {
+ if _, ok := encoder.(ioResetterWriter); ok {
+ pool := &sync.Pool{
+ New: func() interface{} {
+ return fn(ioutil.Discard, c.level)
+ },
+ }
+ c.pooledEncoders[encoding] = pool
+ }
+ }
+ // If the encoder is not in the pooledEncoders, add it to the normal encoders.
+ if _, ok := c.pooledEncoders[encoding]; !ok {
+ c.encoders[encoding] = fn
+ }
+
+ for i, v := range c.encodingPrecedence {
+ if v == encoding {
+ c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
+ }
+ }
+
+ c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
+}
+
+// Handler returns a new middleware that will compress the response based on the
+// current Compressor.
+func (c *Compressor) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ encoder, encoding, cleanup := c.selectEncoder(r.Header, w)
+
+ cw := &compressResponseWriter{
+ ResponseWriter: w,
+ w: w,
+ contentTypes: c.allowedTypes,
+ contentWildcards: c.allowedWildcards,
+ encoding: encoding,
+ compressable: false, // determined in post-handler
+ }
+ if encoder != nil {
+ cw.w = encoder
+ }
+ // Re-add the encoder to the pool if applicable.
+ defer cleanup()
+ defer cw.Close()
+
+ next.ServeHTTP(cw, r)
+ })
+}
+
+// selectEncoder returns the encoder, the name of the encoder, and a closer function.
+func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) {
+ header := h.Get("Accept-Encoding")
+
+ // Parse the names of all accepted algorithms from the header.
+ accepted := strings.Split(strings.ToLower(header), ",")
+
+ // Find supported encoder by accepted list by precedence
+ for _, name := range c.encodingPrecedence {
+ if matchAcceptEncoding(accepted, name) {
+ if pool, ok := c.pooledEncoders[name]; ok {
+ encoder := pool.Get().(ioResetterWriter)
+ cleanup := func() {
+ pool.Put(encoder)
+ }
+ encoder.Reset(w)
+ return encoder, name, cleanup
+
+ }
+ if fn, ok := c.encoders[name]; ok {
+ return fn(w, c.level), name, func() {}
+ }
+ }
+
+ }
+
+ // No encoder found to match the accepted encoding
+ return nil, "", func() {}
+}
+
+func matchAcceptEncoding(accepted []string, encoding string) bool {
+ for _, v := range accepted {
+ if strings.Contains(v, encoding) {
+ return true
+ }
+ }
+ return false
+}
+
+// An EncoderFunc is a function that wraps the provided io.Writer with a
+// streaming compression algorithm and returns it.
+//
+// In case of failure, the function should return nil.
+type EncoderFunc func(w io.Writer, level int) io.Writer
+
+// Interface for types that allow resetting io.Writers.
+type ioResetterWriter interface {
+ io.Writer
+ Reset(w io.Writer)
+}
+
+type compressResponseWriter struct {
+ http.ResponseWriter
+
+ // The streaming encoder writer to be used if there is one. Otherwise,
+ // this is just the normal writer.
+ w io.Writer
+ contentTypes map[string]struct{}
+ contentWildcards map[string]struct{}
+ encoding string
+ wroteHeader bool
+ compressable bool
+}
+
+func (cw *compressResponseWriter) isCompressable() bool {
+ // Parse the first part of the Content-Type response header.
+ contentType := cw.Header().Get("Content-Type")
+ if idx := strings.Index(contentType, ";"); idx >= 0 {
+ contentType = contentType[0:idx]
+ }
+
+ // Is the content type compressable?
+ if _, ok := cw.contentTypes[contentType]; ok {
+ return true
+ }
+ if idx := strings.Index(contentType, "/"); idx > 0 {
+ contentType = contentType[0:idx]
+ _, ok := cw.contentWildcards[contentType]
+ return ok
+ }
+ return false
+}
+
+func (cw *compressResponseWriter) WriteHeader(code int) {
+ if cw.wroteHeader {
+ cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
+ return
+ }
+ cw.wroteHeader = true
+ defer cw.ResponseWriter.WriteHeader(code)
+
+ // Already compressed data?
+ if cw.Header().Get("Content-Encoding") != "" {
+ return
+ }
+
+ if !cw.isCompressable() {
+ cw.compressable = false
+ return
+ }
+
+ if cw.encoding != "" {
+ cw.compressable = true
+ cw.Header().Set("Content-Encoding", cw.encoding)
+ cw.Header().Add("Vary", "Accept-Encoding")
+
+ // The content-length after compression is unknown
+ cw.Header().Del("Content-Length")
+ }
+}
+
+func (cw *compressResponseWriter) Write(p []byte) (int, error) {
+ if !cw.wroteHeader {
+ cw.WriteHeader(http.StatusOK)
+ }
+
+ return cw.writer().Write(p)
+}
+
+func (cw *compressResponseWriter) writer() io.Writer {
+ if cw.compressable {
+ return cw.w
+ } else {
+ return cw.ResponseWriter
+ }
+}
+
+type compressFlusher interface {
+ Flush() error
+}
+
+func (cw *compressResponseWriter) Flush() {
+ if f, ok := cw.writer().(http.Flusher); ok {
+ f.Flush()
+ }
+ // If the underlying writer has a compression flush signature,
+ // call this Flush() method instead
+ if f, ok := cw.writer().(compressFlusher); ok {
+ f.Flush()
+
+ // Also flush the underlying response writer
+ if f, ok := cw.ResponseWriter.(http.Flusher); ok {
+ f.Flush()
+ }
+ }
+}
+
+func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if hj, ok := cw.writer().(http.Hijacker); ok {
+ return hj.Hijack()
+ }
+ return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
+ if ps, ok := cw.writer().(http.Pusher); ok {
+ return ps.Push(target, opts)
+ }
+ return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
+}
+
+func (cw *compressResponseWriter) Close() error {
+ if c, ok := cw.writer().(io.WriteCloser); ok {
+ return c.Close()
+ }
+ return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
+}
+
+func encoderGzip(w io.Writer, level int) io.Writer {
+ gw, err := gzip.NewWriterLevel(w, level)
+ if err != nil {
+ return nil
+ }
+ return gw
+}
+
+func encoderDeflate(w io.Writer, level int) io.Writer {
+ dw, err := flate.NewWriter(w, level)
+ if err != nil {
+ return nil
+ }
+ return dw
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/content_charset.go b/vendor/github.com/go-chi/chi/v5/middleware/content_charset.go
new file mode 100644
index 0000000..07b5ce6
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/content_charset.go
@@ -0,0 +1,51 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
+// An empty charset will allow requests with no Content-Type header or no specified charset.
+func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
+ for i, c := range charsets {
+ charsets[i] = strings.ToLower(c)
+ }
+
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ })
+ }
+}
+
+// Check the content encoding against a list of acceptable values.
+func contentEncoding(ce string, charsets ...string) bool {
+ _, ce = split(strings.ToLower(ce), ";")
+ _, ce = split(ce, "charset=")
+ ce, _ = split(ce, ";")
+ for _, c := range charsets {
+ if ce == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Split a string in two parts, cleaning any whitespace.
+func split(str, sep string) (string, string) {
+ var a, b string
+ var parts = strings.SplitN(str, sep, 2)
+ a = strings.TrimSpace(parts[0])
+ if len(parts) == 2 {
+ b = strings.TrimSpace(parts[1])
+ }
+
+ return a, b
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/content_encoding.go b/vendor/github.com/go-chi/chi/v5/middleware/content_encoding.go
new file mode 100644
index 0000000..e0b9ccc
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/content_encoding.go
@@ -0,0 +1,34 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler {
+ allowedEncodings := make(map[string]struct{}, len(contentEncoding))
+ for _, encoding := range contentEncoding {
+ allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{}
+ }
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ requestEncodings := r.Header["Content-Encoding"]
+ // skip check for empty content body or no Content-Encoding
+ if r.ContentLength == 0 {
+ next.ServeHTTP(w, r)
+ return
+ }
+ // All encodings in the request must be allowed
+ for _, encoding := range requestEncodings {
+ if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok {
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ return
+ }
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/content_type.go b/vendor/github.com/go-chi/chi/v5/middleware/content_type.go
new file mode 100644
index 0000000..023978f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/content_type.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SetHeader is a convenience handler to set a response header key/value
+func SetHeader(key, value string) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(key, value)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// AllowContentType enforces a whitelist of request Content-Types otherwise responds
+// with a 415 Unsupported Media Type status.
+func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
+ allowedContentTypes := make(map[string]struct{}, len(contentTypes))
+ for _, ctype := range contentTypes {
+ allowedContentTypes[strings.TrimSpace(strings.ToLower(ctype))] = struct{}{}
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if r.ContentLength == 0 {
+ // skip check for empty content body
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
+ if i := strings.Index(s, ";"); i > -1 {
+ s = s[0:i]
+ }
+
+ if _, ok := allowedContentTypes[s]; ok {
+ next.ServeHTTP(w, r)
+ return
+ }
+
+ w.WriteHeader(http.StatusUnsupportedMediaType)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/get_head.go b/vendor/github.com/go-chi/chi/v5/middleware/get_head.go
new file mode 100644
index 0000000..d4606d8
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/get_head.go
@@ -0,0 +1,39 @@
+package middleware
+
+import (
+ "net/http"
+
+ "github.com/go-chi/chi/v5"
+)
+
+// GetHead automatically route undefined HEAD requests to GET handlers.
+func GetHead(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "HEAD" {
+ rctx := chi.RouteContext(r.Context())
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ }
+
+ // Temporary routing context to look-ahead before routing the request
+ tctx := chi.NewRouteContext()
+
+ // Attempt to find a HEAD handler for the routing path, if not found, traverse
+ // the router as through its a GET route, but proceed with the request
+ // with the HEAD method.
+ if !rctx.Routes.Match(tctx, "HEAD", routePath) {
+ rctx.RouteMethod = "GET"
+ rctx.RoutePath = routePath
+ next.ServeHTTP(w, r)
+ return
+ }
+ }
+
+ next.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/heartbeat.go b/vendor/github.com/go-chi/chi/v5/middleware/heartbeat.go
new file mode 100644
index 0000000..f36e8cc
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/heartbeat.go
@@ -0,0 +1,26 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Heartbeat endpoint middleware useful to setting up a path like
+// `/ping` that load balancers or uptime testing external services
+// can make a request before hitting any routes. It's also convenient
+// to place this above ACL middlewares as well.
+func Heartbeat(endpoint string) func(http.Handler) http.Handler {
+ f := func(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if (r.Method == "GET" || r.Method == "HEAD") && strings.EqualFold(r.URL.Path, endpoint) {
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte("."))
+ return
+ }
+ h.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+ return f
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/logger.go b/vendor/github.com/go-chi/chi/v5/middleware/logger.go
new file mode 100644
index 0000000..98250d8
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/logger.go
@@ -0,0 +1,171 @@
+package middleware
+
+import (
+ "bytes"
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "runtime"
+ "time"
+)
+
+var (
+ // LogEntryCtxKey is the context.Context key to store the request log entry.
+ LogEntryCtxKey = &contextKey{"LogEntry"}
+
+ // DefaultLogger is called by the Logger middleware handler to log each request.
+ // Its made a package-level variable so that it can be reconfigured for custom
+ // logging configurations.
+ DefaultLogger func(next http.Handler) http.Handler
+)
+
+// Logger is a middleware that logs the start and end of each request, along
+// with some useful data about what was requested, what the response status was,
+// and how long it took to return. When standard output is a TTY, Logger will
+// print in color, otherwise it will print in black and white. Logger prints a
+// request ID if one is provided.
+//
+// Alternatively, look at https://github.com/goware/httplog for a more in-depth
+// http logger with structured logging support.
+//
+// IMPORTANT NOTE: Logger should go before any other middleware that may change
+// the response, such as middleware.Recoverer. Example:
+// r := chi.NewRouter()
+// r.Use(middleware.Logger) // <--<< Logger should come before Recoverer
+// r.Use(middleware.Recoverer)
+// r.Get("/", handler)
+func Logger(next http.Handler) http.Handler {
+ return DefaultLogger(next)
+}
+
+// RequestLogger returns a logger handler using a custom LogFormatter.
+func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ entry := f.NewLogEntry(r)
+ ww := NewWrapResponseWriter(w, r.ProtoMajor)
+
+ t1 := time.Now()
+ defer func() {
+ entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)
+ }()
+
+ next.ServeHTTP(ww, WithLogEntry(r, entry))
+ }
+ return http.HandlerFunc(fn)
+ }
+}
+
+// LogFormatter initiates the beginning of a new LogEntry per request.
+// See DefaultLogFormatter for an example implementation.
+type LogFormatter interface {
+ NewLogEntry(r *http.Request) LogEntry
+}
+
+// LogEntry records the final log when a request completes.
+// See defaultLogEntry for an example implementation.
+type LogEntry interface {
+ Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{})
+ Panic(v interface{}, stack []byte)
+}
+
+// GetLogEntry returns the in-context LogEntry for a request.
+func GetLogEntry(r *http.Request) LogEntry {
+ entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
+ return entry
+}
+
+// WithLogEntry sets the in-context LogEntry for a request.
+func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
+ r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
+ return r
+}
+
+// LoggerInterface accepts printing to stdlib logger or compatible logger.
+type LoggerInterface interface {
+ Print(v ...interface{})
+}
+
+// DefaultLogFormatter is a simple logger that implements a LogFormatter.
+type DefaultLogFormatter struct {
+ Logger LoggerInterface
+ NoColor bool
+}
+
+// NewLogEntry creates a new LogEntry for the request.
+func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
+ useColor := !l.NoColor
+ entry := &defaultLogEntry{
+ DefaultLogFormatter: l,
+ request: r,
+ buf: &bytes.Buffer{},
+ useColor: useColor,
+ }
+
+ reqID := GetReqID(r.Context())
+ if reqID != "" {
+ cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
+ }
+ cW(entry.buf, useColor, nCyan, "\"")
+ cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
+
+ scheme := "http"
+ if r.TLS != nil {
+ scheme = "https"
+ }
+ cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
+
+ entry.buf.WriteString("from ")
+ entry.buf.WriteString(r.RemoteAddr)
+ entry.buf.WriteString(" - ")
+
+ return entry
+}
+
+type defaultLogEntry struct {
+ *DefaultLogFormatter
+ request *http.Request
+ buf *bytes.Buffer
+ useColor bool
+}
+
+func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
+ switch {
+ case status < 200:
+ cW(l.buf, l.useColor, bBlue, "%03d", status)
+ case status < 300:
+ cW(l.buf, l.useColor, bGreen, "%03d", status)
+ case status < 400:
+ cW(l.buf, l.useColor, bCyan, "%03d", status)
+ case status < 500:
+ cW(l.buf, l.useColor, bYellow, "%03d", status)
+ default:
+ cW(l.buf, l.useColor, bRed, "%03d", status)
+ }
+
+ cW(l.buf, l.useColor, bBlue, " %dB", bytes)
+
+ l.buf.WriteString(" in ")
+ if elapsed < 500*time.Millisecond {
+ cW(l.buf, l.useColor, nGreen, "%s", elapsed)
+ } else if elapsed < 5*time.Second {
+ cW(l.buf, l.useColor, nYellow, "%s", elapsed)
+ } else {
+ cW(l.buf, l.useColor, nRed, "%s", elapsed)
+ }
+
+ l.Logger.Print(l.buf.String())
+}
+
+func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
+ PrintPrettyStack(v)
+}
+
+func init() {
+ color := true
+ if runtime.GOOS == "windows" {
+ color = false
+ }
+ DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: !color})
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/maybe.go b/vendor/github.com/go-chi/chi/v5/middleware/maybe.go
new file mode 100644
index 0000000..d8ca63b
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/maybe.go
@@ -0,0 +1,18 @@
+package middleware
+
+import "net/http"
+
+// Maybe middleware will allow you to change the flow of the middleware stack execution depending on return
+// value of maybeFn(request). This is useful for example if you'd like to skip a middleware handler if
+// a request does not satisfied the maybeFn logic.
+func Maybe(mw func(http.Handler) http.Handler, maybeFn func(r *http.Request) bool) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if maybeFn(r) {
+ mw(next).ServeHTTP(w, r)
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/middleware.go b/vendor/github.com/go-chi/chi/v5/middleware/middleware.go
new file mode 100644
index 0000000..cc371e0
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/middleware.go
@@ -0,0 +1,23 @@
+package middleware
+
+import "net/http"
+
+// New will create a new middleware handler from a http.Handler.
+func New(h http.Handler) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ h.ServeHTTP(w, r)
+ })
+ }
+}
+
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation. This technique
+// for defining context keys was copied from Go 1.7's new use of context in net/http.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string {
+ return "chi/middleware context value " + k.name
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/nocache.go b/vendor/github.com/go-chi/chi/v5/middleware/nocache.go
new file mode 100644
index 0000000..2412829
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/nocache.go
@@ -0,0 +1,58 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net/http"
+ "time"
+)
+
+// Unix epoch time
+var epoch = time.Unix(0, 0).Format(time.RFC1123)
+
+// Taken from https://github.com/mytrile/nocache
+var noCacheHeaders = map[string]string{
+ "Expires": epoch,
+ "Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
+ "Pragma": "no-cache",
+ "X-Accel-Expires": "0",
+}
+
+var etagHeaders = []string{
+ "ETag",
+ "If-Modified-Since",
+ "If-Match",
+ "If-None-Match",
+ "If-Range",
+ "If-Unmodified-Since",
+}
+
+// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
+// a router (or subrouter) from being cached by an upstream proxy and/or client.
+//
+// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
+// Expires: Thu, 01 Jan 1970 00:00:00 UTC
+// Cache-Control: no-cache, private, max-age=0
+// X-Accel-Expires: 0
+// Pragma: no-cache (for HTTP/1.0 proxies/clients)
+func NoCache(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+
+ // Delete any ETag headers that may have been set
+ for _, v := range etagHeaders {
+ if r.Header.Get(v) != "" {
+ r.Header.Del(v)
+ }
+ }
+
+ // Set our NoCache headers
+ for k, v := range noCacheHeaders {
+ w.Header().Set(k, v)
+ }
+
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/page_route.go b/vendor/github.com/go-chi/chi/v5/middleware/page_route.go
new file mode 100644
index 0000000..32871b7
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/page_route.go
@@ -0,0 +1,20 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// PageRoute is a simple middleware which allows you to route a static GET request
+// at the middleware stack level.
+func PageRoute(path string, handler http.Handler) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" && strings.EqualFold(r.URL.Path, path) {
+ handler.ServeHTTP(w, r)
+ return
+ }
+ next.ServeHTTP(w, r)
+ })
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/path_rewrite.go b/vendor/github.com/go-chi/chi/v5/middleware/path_rewrite.go
new file mode 100644
index 0000000..99af62c
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/path_rewrite.go
@@ -0,0 +1,16 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// PathRewrite is a simple middleware which allows you to rewrite the request URL path.
+func PathRewrite(old, new string) func(http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ r.URL.Path = strings.Replace(r.URL.Path, old, new, 1)
+ next.ServeHTTP(w, r)
+ })
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/profiler.go b/vendor/github.com/go-chi/chi/v5/middleware/profiler.go
new file mode 100644
index 0000000..3c36f87
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/profiler.go
@@ -0,0 +1,62 @@
+package middleware
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+
+ "github.com/go-chi/chi/v5"
+)
+
+// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
+//
+// func MyService() http.Handler {
+// r := chi.NewRouter()
+// // ..middlewares
+// r.Mount("/debug", middleware.Profiler())
+// // ..routes
+// return r
+// }
+func Profiler() http.Handler {
+ r := chi.NewRouter()
+ r.Use(NoCache)
+
+ r.Get("/", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/pprof/", http.StatusMovedPermanently)
+ })
+ r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, r.RequestURI+"/", http.StatusMovedPermanently)
+ })
+
+ r.HandleFunc("/pprof/*", pprof.Index)
+ r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
+ r.HandleFunc("/pprof/profile", pprof.Profile)
+ r.HandleFunc("/pprof/symbol", pprof.Symbol)
+ r.HandleFunc("/pprof/trace", pprof.Trace)
+ r.HandleFunc("/vars", expVars)
+
+ r.Handle("/pprof/goroutine", pprof.Handler("goroutine"))
+ r.Handle("/pprof/threadcreate", pprof.Handler("threadcreate"))
+ r.Handle("/pprof/mutex", pprof.Handler("mutex"))
+ r.Handle("/pprof/heap", pprof.Handler("heap"))
+ r.Handle("/pprof/block", pprof.Handler("block"))
+ r.Handle("/pprof/allocs", pprof.Handler("allocs"))
+
+ return r
+}
+
+// Replicated from expvar.go as not public.
+func expVars(w http.ResponseWriter, r *http.Request) {
+ first := true
+ w.Header().Set("Content-Type", "application/json")
+ fmt.Fprintf(w, "{\n")
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/realip.go b/vendor/github.com/go-chi/chi/v5/middleware/realip.go
new file mode 100644
index 0000000..2c6b3b3
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/realip.go
@@ -0,0 +1,60 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "net"
+ "net/http"
+ "strings"
+)
+
+var trueClientIP = http.CanonicalHeaderKey("True-Client-IP")
+var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
+var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
+
+// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
+// of parsing either the True-Client-IP, X-Real-IP or the X-Forwarded-For headers
+// (in that order).
+//
+// This middleware should be inserted fairly early in the middleware stack to
+// ensure that subsequent layers (e.g., request loggers) which examine the
+// RemoteAddr will see the intended value.
+//
+// You should only use this middleware if you can trust the headers passed to
+// you (in particular, the two headers this middleware uses), for example
+// because you have placed a reverse proxy like HAProxy or nginx in front of
+// chi. If your reverse proxies are configured to pass along arbitrary header
+// values from the client, or if you use this middleware without a reverse
+// proxy, malicious clients will be able to make you very sad (or, depending on
+// how you're using RemoteAddr, vulnerable to an attack of some sort).
+func RealIP(h http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ if rip := realIP(r); rip != "" {
+ r.RemoteAddr = rip
+ }
+ h.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+func realIP(r *http.Request) string {
+ var ip string
+
+ if tcip := r.Header.Get(trueClientIP); tcip != "" {
+ ip = tcip
+ } else if xrip := r.Header.Get(xRealIP); xrip != "" {
+ ip = xrip
+ } else if xff := r.Header.Get(xForwardedFor); xff != "" {
+ i := strings.Index(xff, ",")
+ if i == -1 {
+ i = len(xff)
+ }
+ ip = xff[:i]
+ }
+ if ip == "" || net.ParseIP(ip) == nil {
+ return ""
+ }
+ return ip
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/recoverer.go b/vendor/github.com/go-chi/chi/v5/middleware/recoverer.go
new file mode 100644
index 0000000..8fa69ba
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/recoverer.go
@@ -0,0 +1,202 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "runtime/debug"
+ "strings"
+)
+
+// Recoverer is a middleware that recovers from panics, logs the panic (and a
+// backtrace), and returns a HTTP 500 (Internal Server Error) status if
+// possible. Recoverer prints a request ID if one is provided.
+//
+// Alternatively, look at https://github.com/go-chi/httplog middleware pkgs.
+func Recoverer(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ defer func() {
+ if rvr := recover(); rvr != nil {
+ if rvr == http.ErrAbortHandler {
+ // we don't recover http.ErrAbortHandler so the response
+ // to the client is aborted, this should not be logged
+ panic(rvr)
+ }
+
+ logEntry := GetLogEntry(r)
+ if logEntry != nil {
+ logEntry.Panic(rvr, debug.Stack())
+ } else {
+ PrintPrettyStack(rvr)
+ }
+
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+ }()
+
+ next.ServeHTTP(w, r)
+ }
+
+ return http.HandlerFunc(fn)
+}
+
+// for ability to test the PrintPrettyStack function
+var recovererErrorWriter io.Writer = os.Stderr
+
+func PrintPrettyStack(rvr interface{}) {
+ debugStack := debug.Stack()
+ s := prettyStack{}
+ out, err := s.parse(debugStack, rvr)
+ if err == nil {
+ recovererErrorWriter.Write(out)
+ } else {
+ // print stdlib output as a fallback
+ os.Stderr.Write(debugStack)
+ }
+}
+
+type prettyStack struct {
+}
+
+func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) {
+ var err error
+ useColor := true
+ buf := &bytes.Buffer{}
+
+ cW(buf, false, bRed, "\n")
+ cW(buf, useColor, bCyan, " panic: ")
+ cW(buf, useColor, bBlue, "%v", rvr)
+ cW(buf, false, bWhite, "\n \n")
+
+ // process debug stack info
+ stack := strings.Split(string(debugStack), "\n")
+ lines := []string{}
+
+ // locate panic line, as we may have nested panics
+ for i := len(stack) - 1; i > 0; i-- {
+ lines = append(lines, stack[i])
+ if strings.HasPrefix(stack[i], "panic(") {
+ lines = lines[0 : len(lines)-2] // remove boilerplate
+ break
+ }
+ }
+
+ // reverse
+ for i := len(lines)/2 - 1; i >= 0; i-- {
+ opp := len(lines) - 1 - i
+ lines[i], lines[opp] = lines[opp], lines[i]
+ }
+
+ // decorate
+ for i, line := range lines {
+ lines[i], err = s.decorateLine(line, useColor, i)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ for _, l := range lines {
+ fmt.Fprintf(buf, "%s", l)
+ }
+ return buf.Bytes(), nil
+}
+
+func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) {
+ line = strings.TrimSpace(line)
+ if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") {
+ return s.decorateSourceLine(line, useColor, num)
+ } else if strings.HasSuffix(line, ")") {
+ return s.decorateFuncCallLine(line, useColor, num)
+ } else {
+ if strings.HasPrefix(line, "\t") {
+ return strings.Replace(line, "\t", " ", 1), nil
+ } else {
+ return fmt.Sprintf(" %s\n", line), nil
+ }
+ }
+}
+
+func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, "(")
+ if idx < 0 {
+ return "", errors.New("not a func call line")
+ }
+
+ buf := &bytes.Buffer{}
+ pkg := line[0:idx]
+ // addr := line[idx:]
+ method := ""
+
+ if idx := strings.LastIndex(pkg, string(os.PathSeparator)); idx < 0 {
+ if idx := strings.Index(pkg, "."); idx > 0 {
+ method = pkg[idx:]
+ pkg = pkg[0:idx]
+ }
+ } else {
+ method = pkg[idx+1:]
+ pkg = pkg[0 : idx+1]
+ if idx := strings.Index(method, "."); idx > 0 {
+ pkg += method[0:idx]
+ method = method[idx:]
+ }
+ }
+ pkgColor := nYellow
+ methodColor := bGreen
+
+ if num == 0 {
+ cW(buf, useColor, bRed, " -> ")
+ pkgColor = bMagenta
+ methodColor = bRed
+ } else {
+ cW(buf, useColor, bWhite, " ")
+ }
+ cW(buf, useColor, pkgColor, "%s", pkg)
+ cW(buf, useColor, methodColor, "%s\n", method)
+ // cW(buf, useColor, nBlack, "%s", addr)
+ return buf.String(), nil
+}
+
+func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) {
+ idx := strings.LastIndex(line, ".go:")
+ if idx < 0 {
+ return "", errors.New("not a source line")
+ }
+
+ buf := &bytes.Buffer{}
+ path := line[0 : idx+3]
+ lineno := line[idx+3:]
+
+ idx = strings.LastIndex(path, string(os.PathSeparator))
+ dir := path[0 : idx+1]
+ file := path[idx+1:]
+
+ idx = strings.Index(lineno, " ")
+ if idx > 0 {
+ lineno = lineno[0:idx]
+ }
+ fileColor := bCyan
+ lineColor := bGreen
+
+ if num == 1 {
+ cW(buf, useColor, bRed, " -> ")
+ fileColor = bRed
+ lineColor = bMagenta
+ } else {
+ cW(buf, false, bWhite, " ")
+ }
+ cW(buf, useColor, bWhite, "%s", dir)
+ cW(buf, useColor, fileColor, "%s", file)
+ cW(buf, useColor, lineColor, "%s", lineno)
+ if num == 1 {
+ cW(buf, false, bWhite, "\n")
+ }
+ cW(buf, false, bWhite, "\n")
+
+ return buf.String(), nil
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/request_id.go b/vendor/github.com/go-chi/chi/v5/middleware/request_id.go
new file mode 100644
index 0000000..4903ecc
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/request_id.go
@@ -0,0 +1,96 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "sync/atomic"
+)
+
+// Key to use when setting the request ID.
+type ctxKeyRequestID int
+
+// RequestIDKey is the key that holds the unique request ID in a request context.
+const RequestIDKey ctxKeyRequestID = 0
+
+// RequestIDHeader is the name of the HTTP Header which contains the request id.
+// Exported so that it can be changed by developers
+var RequestIDHeader = "X-Request-Id"
+
+var prefix string
+var reqid uint64
+
+// A quick note on the statistics here: we're trying to calculate the chance that
+// two randomly generated base62 prefixes will collide. We use the formula from
+// http://en.wikipedia.org/wiki/Birthday_problem
+//
+// P[m, n] \approx 1 - e^{-m^2/2n}
+//
+// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
+// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
+//
+// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
+//
+// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
+// our purposes, and is surely more than anyone would ever need in practice -- a
+// process that is rebooted a handful of times a day for a hundred years has less
+// than a millionth of a percent chance of generating two colliding IDs.
+
+func init() {
+ hostname, err := os.Hostname()
+ if hostname == "" || err != nil {
+ hostname = "localhost"
+ }
+ var buf [12]byte
+ var b64 string
+ for len(b64) < 10 {
+ rand.Read(buf[:])
+ b64 = base64.StdEncoding.EncodeToString(buf[:])
+ b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
+ }
+
+ prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
+}
+
+// RequestID is a middleware that injects a request ID into the context of each
+// request. A request ID is a string of the form "host.example.com/random-0001",
+// where "random" is a base62 random string that uniquely identifies this go
+// process, and where the last number is an atomically incremented request
+// counter.
+func RequestID(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+ requestID := r.Header.Get(RequestIDHeader)
+ if requestID == "" {
+ myid := atomic.AddUint64(&reqid, 1)
+ requestID = fmt.Sprintf("%s-%06d", prefix, myid)
+ }
+ ctx = context.WithValue(ctx, RequestIDKey, requestID)
+ next.ServeHTTP(w, r.WithContext(ctx))
+ }
+ return http.HandlerFunc(fn)
+}
+
+// GetReqID returns a request ID from the given context if one is present.
+// Returns the empty string if a request ID cannot be found.
+func GetReqID(ctx context.Context) string {
+ if ctx == nil {
+ return ""
+ }
+ if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
+ return reqID
+ }
+ return ""
+}
+
+// NextRequestID generates the next request ID in the sequence.
+func NextRequestID() uint64 {
+ return atomic.AddUint64(&reqid, 1)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go
new file mode 100644
index 0000000..ea914a1
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/route_headers.go
@@ -0,0 +1,160 @@
+package middleware
+
+import (
+ "net/http"
+ "strings"
+)
+
+// RouteHeaders is a neat little header-based router that allows you to direct
+// the flow of a request through a middleware stack based on a request header.
+//
+// For example, lets say you'd like to setup multiple routers depending on the
+// request Host header, you could then do something as so:
+//
+// r := chi.NewRouter()
+// rSubdomain := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Host", "example.com", middleware.New(r)).
+// Route("Host", "*.example.com", middleware.New(rSubdomain)).
+// Handler)
+//
+// r.Get("/", h)
+// rSubdomain.Get("/", h2)
+//
+//
+// Another example, imagine you want to setup multiple CORS handlers, where for
+// your origin servers you allow authorized requests, but for third-party public
+// requests, authorization is disabled.
+//
+// r := chi.NewRouter()
+//
+// r.Use(middleware.RouteHeaders().
+// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"https://api.skyweaver.net"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
+// AllowCredentials: true, // <----------<<< allow credentials
+// })).
+// Route("Origin", "*", cors.Handler(cors.Options{
+// AllowedOrigins: []string{"*"},
+// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
+// AllowedHeaders: []string{"Accept", "Content-Type"},
+// AllowCredentials: false, // <----------<<< do not allow credentials
+// })).
+// Handler)
+//
+func RouteHeaders() HeaderRouter {
+ return HeaderRouter{}
+}
+
+type HeaderRouter map[string][]HeaderRoute
+
+func (hr HeaderRouter) Route(header, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
+ header = strings.ToLower(header)
+ k := hr[header]
+ if k == nil {
+ hr[header] = []HeaderRoute{}
+ }
+ patterns := []Pattern{}
+ for _, m := range match {
+ patterns = append(patterns, NewPattern(m))
+ }
+ hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler})
+ return hr
+}
+
+func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter {
+ hr["*"] = []HeaderRoute{{Middleware: handler}}
+ return hr
+}
+
+func (hr HeaderRouter) Handler(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if len(hr) == 0 {
+ // skip if no routes set
+ next.ServeHTTP(w, r)
+ }
+
+ // find first matching header route, and continue
+ for header, matchers := range hr {
+ headerValue := r.Header.Get(header)
+ if headerValue == "" {
+ continue
+ }
+ headerValue = strings.ToLower(headerValue)
+ for _, matcher := range matchers {
+ if matcher.IsMatch(headerValue) {
+ matcher.Middleware(next).ServeHTTP(w, r)
+ return
+ }
+ }
+ }
+
+ // if no match, check for "*" default route
+ matcher, ok := hr["*"]
+ if !ok || matcher[0].Middleware == nil {
+ next.ServeHTTP(w, r)
+ return
+ }
+ matcher[0].Middleware(next).ServeHTTP(w, r)
+ })
+}
+
+type HeaderRoute struct {
+ Middleware func(next http.Handler) http.Handler
+ MatchOne Pattern
+ MatchAny []Pattern
+}
+
+func (r HeaderRoute) IsMatch(value string) bool {
+ if len(r.MatchAny) > 0 {
+ for _, m := range r.MatchAny {
+ if m.Match(value) {
+ return true
+ }
+ }
+ } else if r.MatchOne.Match(value) {
+ return true
+ }
+ return false
+}
+
+type Pattern struct {
+ prefix string
+ suffix string
+ wildcard bool
+}
+
+func NewPattern(value string) Pattern {
+ p := Pattern{}
+ if i := strings.IndexByte(value, '*'); i >= 0 {
+ p.wildcard = true
+ p.prefix = value[0:i]
+ p.suffix = value[i+1:]
+ } else {
+ p.prefix = value
+ }
+ return p
+}
+
+func (p Pattern) Match(v string) bool {
+ if !p.wildcard {
+ if p.prefix == v {
+ return true
+ } else {
+ return false
+ }
+ }
+ return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/strip.go b/vendor/github.com/go-chi/chi/v5/middleware/strip.go
new file mode 100644
index 0000000..ce8ebfc
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/strip.go
@@ -0,0 +1,62 @@
+package middleware
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-chi/chi/v5"
+)
+
+// StripSlashes is a middleware that will match request paths with a trailing
+// slash, strip it from the path and continue routing through the mux, if a route
+// matches, then it will serve the handler.
+func StripSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx != nil && rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ newPath := path[:len(path)-1]
+ if rctx == nil {
+ r.URL.Path = newPath
+ } else {
+ rctx.RoutePath = newPath
+ }
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
+
+// RedirectSlashes is a middleware that will match request paths with a trailing
+// slash and redirect to the same path, less the trailing slash.
+//
+// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
+// see https://github.com/go-chi/chi/issues/343
+func RedirectSlashes(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ var path string
+ rctx := chi.RouteContext(r.Context())
+ if rctx != nil && rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ } else {
+ path = r.URL.Path
+ }
+ if len(path) > 1 && path[len(path)-1] == '/' {
+ if r.URL.RawQuery != "" {
+ path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
+ } else {
+ path = path[:len(path)-1]
+ }
+ redirectURL := fmt.Sprintf("//%s%s", r.Host, path)
+ http.Redirect(w, r, redirectURL, 301)
+ return
+ }
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/terminal.go b/vendor/github.com/go-chi/chi/v5/middleware/terminal.go
new file mode 100644
index 0000000..5ead7b9
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/terminal.go
@@ -0,0 +1,63 @@
+package middleware
+
+// Ported from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ // Normal colors
+ nBlack = []byte{'\033', '[', '3', '0', 'm'}
+ nRed = []byte{'\033', '[', '3', '1', 'm'}
+ nGreen = []byte{'\033', '[', '3', '2', 'm'}
+ nYellow = []byte{'\033', '[', '3', '3', 'm'}
+ nBlue = []byte{'\033', '[', '3', '4', 'm'}
+ nMagenta = []byte{'\033', '[', '3', '5', 'm'}
+ nCyan = []byte{'\033', '[', '3', '6', 'm'}
+ nWhite = []byte{'\033', '[', '3', '7', 'm'}
+ // Bright colors
+ bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
+ bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
+ bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
+ bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
+ bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
+ bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
+ bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
+ bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
+
+ reset = []byte{'\033', '[', '0', 'm'}
+)
+
+var IsTTY bool
+
+func init() {
+ // This is sort of cheating: if stdout is a character device, we assume
+ // that means it's a TTY. Unfortunately, there are many non-TTY
+ // character devices, but fortunately stdout is rarely set to any of
+ // them.
+ //
+ // We could solve this properly by pulling in a dependency on
+ // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
+ // heuristic for whether to print in color or in black-and-white, I'd
+ // really rather not.
+ fi, err := os.Stdout.Stat()
+ if err == nil {
+ m := os.ModeDevice | os.ModeCharDevice
+ IsTTY = fi.Mode()&m == m
+ }
+}
+
+// colorWrite
+func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
+ if IsTTY && useColor {
+ w.Write(color)
+ }
+ fmt.Fprintf(w, s, args...)
+ if IsTTY && useColor {
+ w.Write(reset)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/throttle.go b/vendor/github.com/go-chi/chi/v5/middleware/throttle.go
new file mode 100644
index 0000000..8dcb944
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/throttle.go
@@ -0,0 +1,132 @@
+package middleware
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+)
+
+const (
+ errCapacityExceeded = "Server capacity exceeded."
+ errTimedOut = "Timed out while waiting for a pending request to complete."
+ errContextCanceled = "Context was canceled."
+)
+
+var (
+ defaultBacklogTimeout = time.Second * 60
+)
+
+// ThrottleOpts represents a set of throttling options.
+type ThrottleOpts struct {
+ RetryAfterFn func(ctxDone bool) time.Duration
+ Limit int
+ BacklogLimit int
+ BacklogTimeout time.Duration
+}
+
+// Throttle is a middleware that limits number of currently processed requests
+// at a time across all users. Note: Throttle is not a rate-limiter per user,
+// instead it just puts a ceiling on the number of currentl in-flight requests
+// being processed from the point from where the Throttle middleware is mounted.
+func Throttle(limit int) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
+}
+
+// ThrottleBacklog is a middleware that limits number of currently processed
+// requests at a time and provides a backlog for holding a finite number of
+// pending requests.
+func ThrottleBacklog(limit, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
+ return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
+}
+
+// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
+func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
+ if opts.Limit < 1 {
+ panic("chi/middleware: Throttle expects limit > 0")
+ }
+
+ if opts.BacklogLimit < 0 {
+ panic("chi/middleware: Throttle expects backlogLimit to be positive")
+ }
+
+ t := throttler{
+ tokens: make(chan token, opts.Limit),
+ backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
+ backlogTimeout: opts.BacklogTimeout,
+ retryAfterFn: opts.RetryAfterFn,
+ }
+
+ // Filling tokens.
+ for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
+ if i < opts.Limit {
+ t.tokens <- token{}
+ }
+ t.backlogTokens <- token{}
+ }
+
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ select {
+
+ case <-ctx.Done():
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusTooManyRequests)
+ return
+
+ case btok := <-t.backlogTokens:
+ timer := time.NewTimer(t.backlogTimeout)
+
+ defer func() {
+ t.backlogTokens <- btok
+ }()
+
+ select {
+ case <-timer.C:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errTimedOut, http.StatusTooManyRequests)
+ return
+ case <-ctx.Done():
+ timer.Stop()
+ t.setRetryAfterHeaderIfNeeded(w, true)
+ http.Error(w, errContextCanceled, http.StatusTooManyRequests)
+ return
+ case tok := <-t.tokens:
+ defer func() {
+ timer.Stop()
+ t.tokens <- tok
+ }()
+ next.ServeHTTP(w, r)
+ }
+ return
+
+ default:
+ t.setRetryAfterHeaderIfNeeded(w, false)
+ http.Error(w, errCapacityExceeded, http.StatusTooManyRequests)
+ return
+ }
+ }
+
+ return http.HandlerFunc(fn)
+ }
+}
+
+// token represents a request that is being processed.
+type token struct{}
+
+// throttler limits number of currently processed requests at a time.
+type throttler struct {
+ tokens chan token
+ backlogTokens chan token
+ retryAfterFn func(ctxDone bool) time.Duration
+ backlogTimeout time.Duration
+}
+
+// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
+func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
+ if t.retryAfterFn == nil {
+ return
+ }
+ w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/timeout.go b/vendor/github.com/go-chi/chi/v5/middleware/timeout.go
new file mode 100644
index 0000000..8e37353
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/timeout.go
@@ -0,0 +1,49 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "time"
+)
+
+// Timeout is a middleware that cancels ctx after a given timeout and return
+// a 504 Gateway Timeout error to the client.
+//
+// It's required that you select the ctx.Done() channel to check for the signal
+// if the context has reached its deadline and return, otherwise the timeout
+// signal will be just ignored.
+//
+// ie. a route/handler may look like:
+//
+// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
+// ctx := r.Context()
+// processTime := time.Duration(rand.Intn(4)+1) * time.Second
+//
+// select {
+// case <-ctx.Done():
+// return
+//
+// case <-time.After(processTime):
+// // The above channel simulates some hard work.
+// }
+//
+// w.Write([]byte("done"))
+// })
+//
+func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx, cancel := context.WithTimeout(r.Context(), timeout)
+ defer func() {
+ cancel()
+ if ctx.Err() == context.DeadlineExceeded {
+ w.WriteHeader(http.StatusGatewayTimeout)
+ }
+ }()
+
+ r = r.WithContext(ctx)
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/url_format.go b/vendor/github.com/go-chi/chi/v5/middleware/url_format.go
new file mode 100644
index 0000000..919eb0f
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/url_format.go
@@ -0,0 +1,76 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+ "strings"
+
+ "github.com/go-chi/chi/v5"
+)
+
+var (
+ // URLFormatCtxKey is the context.Context key to store the URL format data
+ // for a request.
+ URLFormatCtxKey = &contextKey{"URLFormat"}
+)
+
+// URLFormat is a middleware that parses the url extension from a request path and stores it
+// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
+// trim the suffix from the routing path and continue routing.
+//
+// Routers should not include a url parameter for the suffix when using this middleware.
+//
+// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
+//
+// func routes() http.Handler {
+// r := chi.NewRouter()
+// r.Use(middleware.URLFormat)
+//
+// r.Get("/articles/{id}", ListArticles)
+//
+// return r
+// }
+//
+// func ListArticles(w http.ResponseWriter, r *http.Request) {
+// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
+//
+// switch urlFormat {
+// case "json":
+// render.JSON(w, r, articles)
+// case "xml:"
+// render.XML(w, r, articles)
+// default:
+// render.JSON(w, r, articles)
+// }
+// }
+//
+func URLFormat(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ var format string
+ path := r.URL.Path
+
+ rctx := chi.RouteContext(r.Context())
+ if rctx != nil && rctx.RoutePath != "" {
+ path = rctx.RoutePath
+ }
+
+ if strings.Index(path, ".") > 0 {
+ base := strings.LastIndex(path, "/")
+ idx := strings.LastIndex(path[base:], ".")
+
+ if idx > 0 {
+ idx += base
+ format = path[idx+1:]
+
+ rctx.RoutePath = path[:idx]
+ }
+ }
+
+ r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
+
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/value.go b/vendor/github.com/go-chi/chi/v5/middleware/value.go
new file mode 100644
index 0000000..a9dfd43
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/value.go
@@ -0,0 +1,17 @@
+package middleware
+
+import (
+ "context"
+ "net/http"
+)
+
+// WithValue is a middleware that sets a given key/value in a context chain.
+func WithValue(key, val interface{}) func(next http.Handler) http.Handler {
+ return func(next http.Handler) http.Handler {
+ fn := func(w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(context.WithValue(r.Context(), key, val))
+ next.ServeHTTP(w, r)
+ }
+ return http.HandlerFunc(fn)
+ }
+}
diff --git a/vendor/github.com/go-chi/chi/v5/middleware/wrap_writer.go b/vendor/github.com/go-chi/chi/v5/middleware/wrap_writer.go
new file mode 100644
index 0000000..cf5c44d
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/middleware/wrap_writer.go
@@ -0,0 +1,219 @@
+package middleware
+
+// The original work was derived from Goji's middleware, source:
+// https://github.com/zenazn/goji/tree/master/web/middleware
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
+// hook into various parts of the response process.
+func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
+ _, fl := w.(http.Flusher)
+
+ bw := basicWriter{ResponseWriter: w}
+
+ if protoMajor == 2 {
+ _, ps := w.(http.Pusher)
+ if fl && ps {
+ return &http2FancyWriter{bw}
+ }
+ } else {
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ if fl && hj && rf {
+ return &httpFancyWriter{bw}
+ }
+ if fl && hj {
+ return &flushHijackWriter{bw}
+ }
+ if hj {
+ return &hijackWriter{bw}
+ }
+ }
+
+ if fl {
+ return &flushWriter{bw}
+ }
+
+ return &bw
+}
+
+// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
+// into various parts of the response process.
+type WrapResponseWriter interface {
+ http.ResponseWriter
+ // Status returns the HTTP status of the request, or 0 if one has not
+ // yet been sent.
+ Status() int
+ // BytesWritten returns the total number of bytes sent to the client.
+ BytesWritten() int
+ // Tee causes the response body to be written to the given io.Writer in
+ // addition to proxying the writes through. Only one io.Writer can be
+ // tee'd to at once: setting a second one will overwrite the first.
+ // Writes will be sent to the proxy before being written to this
+ // io.Writer. It is illegal for the tee'd writer to be modified
+ // concurrently with writes.
+ Tee(io.Writer)
+ // Unwrap returns the original proxied target.
+ Unwrap() http.ResponseWriter
+}
+
+// basicWriter wraps a http.ResponseWriter that implements the minimal
+// http.ResponseWriter interface.
+type basicWriter struct {
+ http.ResponseWriter
+ wroteHeader bool
+ code int
+ bytes int
+ tee io.Writer
+}
+
+func (b *basicWriter) WriteHeader(code int) {
+ if !b.wroteHeader {
+ b.code = code
+ b.wroteHeader = true
+ b.ResponseWriter.WriteHeader(code)
+ }
+}
+
+func (b *basicWriter) Write(buf []byte) (int, error) {
+ b.maybeWriteHeader()
+ n, err := b.ResponseWriter.Write(buf)
+ if b.tee != nil {
+ _, err2 := b.tee.Write(buf[:n])
+ // Prefer errors generated by the proxied writer.
+ if err == nil {
+ err = err2
+ }
+ }
+ b.bytes += n
+ return n, err
+}
+
+func (b *basicWriter) maybeWriteHeader() {
+ if !b.wroteHeader {
+ b.WriteHeader(http.StatusOK)
+ }
+}
+
+func (b *basicWriter) Status() int {
+ return b.code
+}
+
+func (b *basicWriter) BytesWritten() int {
+ return b.bytes
+}
+
+func (b *basicWriter) Tee(w io.Writer) {
+ b.tee = w
+}
+
+func (b *basicWriter) Unwrap() http.ResponseWriter {
+ return b.ResponseWriter
+}
+
+// flushWriter ...
+type flushWriter struct {
+ basicWriter
+}
+
+func (f *flushWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &flushWriter{}
+
+// hijackWriter ...
+type hijackWriter struct {
+ basicWriter
+}
+
+func (f *hijackWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+var _ http.Hijacker = &hijackWriter{}
+
+// flushHijackWriter ...
+type flushHijackWriter struct {
+ basicWriter
+}
+
+func (f *flushHijackWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+func (f *flushHijackWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+var _ http.Flusher = &flushHijackWriter{}
+var _ http.Hijacker = &flushHijackWriter{}
+
+// httpFancyWriter is a HTTP writer that additionally satisfies
+// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type httpFancyWriter struct {
+ basicWriter
+}
+
+func (f *httpFancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ hj := f.basicWriter.ResponseWriter.(http.Hijacker)
+ return hj.Hijack()
+}
+
+func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
+ return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
+ if f.basicWriter.tee != nil {
+ n, err := io.Copy(&f.basicWriter, r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+ }
+ rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
+ f.basicWriter.maybeWriteHeader()
+ n, err := rf.ReadFrom(r)
+ f.basicWriter.bytes += int(n)
+ return n, err
+}
+
+var _ http.Flusher = &httpFancyWriter{}
+var _ http.Hijacker = &httpFancyWriter{}
+var _ http.Pusher = &http2FancyWriter{}
+var _ io.ReaderFrom = &httpFancyWriter{}
+
+// http2FancyWriter is a HTTP2 writer that additionally satisfies
+// http.Flusher, and io.ReaderFrom. It exists for the common case
+// of wrapping the http.ResponseWriter that package http gives you, in order to
+// make the proxied object support the full method set of the proxied object.
+type http2FancyWriter struct {
+ basicWriter
+}
+
+func (f *http2FancyWriter) Flush() {
+ f.wroteHeader = true
+ fl := f.basicWriter.ResponseWriter.(http.Flusher)
+ fl.Flush()
+}
+
+var _ http.Flusher = &http2FancyWriter{}
diff --git a/vendor/github.com/go-chi/chi/v5/mux.go b/vendor/github.com/go-chi/chi/v5/mux.go
new file mode 100644
index 0000000..47e64cf
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/mux.go
@@ -0,0 +1,487 @@
+package chi
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+)
+
+var _ Router = &Mux{}
+
+// Mux is a simple HTTP route multiplexer that parses a request path,
+// records any URL params, and executes an end handler. It implements
+// the http.Handler interface and is friendly with the standard library.
+//
+// Mux is designed to be fast, minimal and offer a powerful API for building
+// modular and composable HTTP services with a large set of handlers. It's
+// particularly useful for writing large REST API services that break a handler
+// into many smaller parts composed of middlewares and end handlers.
+type Mux struct {
+ // The computed mux handler made of the chained middleware stack and
+ // the tree router
+ handler http.Handler
+
+ // The radix trie router
+ tree *node
+
+ // Custom method not allowed handler
+ methodNotAllowedHandler http.HandlerFunc
+
+ // A reference to the parent mux used by subrouters when mounting
+ // to a parent mux
+ parent *Mux
+
+ // Routing context pool
+ pool *sync.Pool
+
+ // Custom route not found handler
+ notFoundHandler http.HandlerFunc
+
+ // The middleware stack
+ middlewares []func(http.Handler) http.Handler
+
+ // Controls the behaviour of middleware chain generation when a mux
+ // is registered as an inline group inside another mux.
+ inline bool
+}
+
+// NewMux returns a newly initialized Mux object that implements the Router
+// interface.
+func NewMux() *Mux {
+ mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
+ mux.pool.New = func() interface{} {
+ return NewRouteContext()
+ }
+ return mux
+}
+
+// ServeHTTP is the single method of the http.Handler interface that makes
+// Mux interoperable with the standard library. It uses a sync.Pool to get and
+// reuse routing contexts for each request.
+func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Ensure the mux has some routes defined on the mux
+ if mx.handler == nil {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Check if a routing context already exists from a parent router.
+ rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
+ if rctx != nil {
+ mx.handler.ServeHTTP(w, r)
+ return
+ }
+
+ // Fetch a RouteContext object from the sync pool, and call the computed
+ // mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
+ // Once the request is finished, reset the routing context and put it back
+ // into the pool for reuse from another request.
+ rctx = mx.pool.Get().(*Context)
+ rctx.Reset()
+ rctx.Routes = mx
+ rctx.parentCtx = r.Context()
+
+ // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
+ r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
+
+ // Serve the request and once its done, put the request context back in the sync pool
+ mx.handler.ServeHTTP(w, r)
+ mx.pool.Put(rctx)
+}
+
+// Use appends a middleware handler to the Mux middleware stack.
+//
+// The middleware stack for any Mux will execute before searching for a matching
+// route to a specific handler, which provides opportunity to respond early,
+// change the course of the request execution, or set request-scoped values for
+// the next http.Handler.
+func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
+ if mx.handler != nil {
+ panic("chi: all middlewares must be defined before routes on a mux")
+ }
+ mx.middlewares = append(mx.middlewares, middlewares...)
+}
+
+// Handle adds the route `pattern` that matches any http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Handle(pattern string, handler http.Handler) {
+ mx.handle(mALL, pattern, handler)
+}
+
+// HandleFunc adds the route `pattern` that matches any http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mALL, pattern, handlerFn)
+}
+
+// Method adds the route `pattern` that matches `method` http method to
+// execute the `handler` http.Handler.
+func (mx *Mux) Method(method, pattern string, handler http.Handler) {
+ m, ok := methodMap[strings.ToUpper(method)]
+ if !ok {
+ panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
+ }
+ mx.handle(m, pattern, handler)
+}
+
+// MethodFunc adds the route `pattern` that matches `method` http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
+ mx.Method(method, pattern, handlerFn)
+}
+
+// Connect adds the route `pattern` that matches a CONNECT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mCONNECT, pattern, handlerFn)
+}
+
+// Delete adds the route `pattern` that matches a DELETE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mDELETE, pattern, handlerFn)
+}
+
+// Get adds the route `pattern` that matches a GET http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mGET, pattern, handlerFn)
+}
+
+// Head adds the route `pattern` that matches a HEAD http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mHEAD, pattern, handlerFn)
+}
+
+// Options adds the route `pattern` that matches a OPTIONS http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mOPTIONS, pattern, handlerFn)
+}
+
+// Patch adds the route `pattern` that matches a PATCH http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPATCH, pattern, handlerFn)
+}
+
+// Post adds the route `pattern` that matches a POST http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPOST, pattern, handlerFn)
+}
+
+// Put adds the route `pattern` that matches a PUT http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mPUT, pattern, handlerFn)
+}
+
+// Trace adds the route `pattern` that matches a TRACE http method to
+// execute the `handlerFn` http.HandlerFunc.
+func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
+ mx.handle(mTRACE, pattern, handlerFn)
+}
+
+// NotFound sets a custom http.HandlerFunc for routing paths that could
+// not be found. The default 404 handler is `http.NotFound`.
+func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
+ // Build NotFound handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the notFoundHandler from this point forward
+ m.notFoundHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.notFoundHandler == nil {
+ subMux.NotFound(hFn)
+ }
+ })
+}
+
+// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
+// method is unresolved. The default handler returns a 405 with an empty body.
+func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
+ // Build MethodNotAllowed handler chain
+ m := mx
+ hFn := handlerFn
+ if mx.inline && mx.parent != nil {
+ m = mx.parent
+ hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
+ }
+
+ // Update the methodNotAllowedHandler from this point forward
+ m.methodNotAllowedHandler = hFn
+ m.updateSubRoutes(func(subMux *Mux) {
+ if subMux.methodNotAllowedHandler == nil {
+ subMux.MethodNotAllowed(hFn)
+ }
+ })
+}
+
+// With adds inline middlewares for an endpoint handler.
+func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
+ // Similarly as in handle(), we must build the mux handler once additional
+ // middleware registration isn't allowed for this stack, like now.
+ if !mx.inline && mx.handler == nil {
+ mx.updateRouteHandler()
+ }
+
+ // Copy middlewares from parent inline muxs
+ var mws Middlewares
+ if mx.inline {
+ mws = make(Middlewares, len(mx.middlewares))
+ copy(mws, mx.middlewares)
+ }
+ mws = append(mws, middlewares...)
+
+ im := &Mux{
+ pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
+ notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
+ }
+
+ return im
+}
+
+// Group creates a new inline-Mux with a fresh middleware stack. It's useful
+// for a group of handlers along the same routing path that use an additional
+// set of middlewares. See _examples/.
+func (mx *Mux) Group(fn func(r Router)) Router {
+ im := mx.With().(*Mux)
+ if fn != nil {
+ fn(im)
+ }
+ return im
+}
+
+// Route creates a new Mux with a fresh middleware stack and mounts it
+// along the `pattern` as a subrouter. Effectively, this is a short-hand
+// call to Mount. See _examples/.
+func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
+ if fn == nil {
+ panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern))
+ }
+ subRouter := NewRouter()
+ fn(subRouter)
+ mx.Mount(pattern, subRouter)
+ return subRouter
+}
+
+// Mount attaches another http.Handler or chi Router as a subrouter along a routing
+// path. It's very useful to split up a large API as many independent routers and
+// compose them as a single service using Mount. See _examples/.
+//
+// Note that Mount() simply sets a wildcard along the `pattern` that will continue
+// routing at the `handler`, which in most cases is another chi.Router. As a result,
+// if you define two Mount() routes on the exact same pattern the mount will panic.
+func (mx *Mux) Mount(pattern string, handler http.Handler) {
+ if handler == nil {
+ panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern))
+ }
+
+ // Provide runtime safety for ensuring a pattern isn't mounted on an existing
+ // routing pattern.
+ if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
+ panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
+ }
+
+ // Assign sub-Router's with the parent not found & method not allowed handler if not specified.
+ subr, ok := handler.(*Mux)
+ if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
+ subr.NotFound(mx.notFoundHandler)
+ }
+ if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
+ subr.MethodNotAllowed(mx.methodNotAllowedHandler)
+ }
+
+ mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ rctx := RouteContext(r.Context())
+
+ // shift the url path past the previous subrouter
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+
+ // reset the wildcard URLParam which connects the subrouter
+ n := len(rctx.URLParams.Keys) - 1
+ if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n {
+ rctx.URLParams.Values[n] = ""
+ }
+
+ handler.ServeHTTP(w, r)
+ })
+
+ if pattern == "" || pattern[len(pattern)-1] != '/' {
+ mx.handle(mALL|mSTUB, pattern, mountHandler)
+ mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
+ pattern += "/"
+ }
+
+ method := mALL
+ subroutes, _ := handler.(Routes)
+ if subroutes != nil {
+ method |= mSTUB
+ }
+ n := mx.handle(method, pattern+"*", mountHandler)
+
+ if subroutes != nil {
+ n.subroutes = subroutes
+ }
+}
+
+// Routes returns a slice of routing information from the tree,
+// useful for traversing available routes of a router.
+func (mx *Mux) Routes() []Route {
+ return mx.tree.routes()
+}
+
+// Middlewares returns a slice of middleware handler functions.
+func (mx *Mux) Middlewares() Middlewares {
+ return mx.middlewares
+}
+
+// Match searches the routing tree for a handler that matches the method/path.
+// It's similar to routing a http request, but without executing the handler
+// thereafter.
+//
+// Note: the *Context state is updated during execution, so manage
+// the state carefully or make a NewRouteContext().
+func (mx *Mux) Match(rctx *Context, method, path string) bool {
+ m, ok := methodMap[method]
+ if !ok {
+ return false
+ }
+
+ node, _, h := mx.tree.FindRoute(rctx, m, path)
+
+ if node != nil && node.subroutes != nil {
+ rctx.RoutePath = mx.nextRoutePath(rctx)
+ return node.subroutes.Match(rctx, method, rctx.RoutePath)
+ }
+
+ return h != nil
+}
+
+// NotFoundHandler returns the default Mux 404 responder whenever a route
+// cannot be found.
+func (mx *Mux) NotFoundHandler() http.HandlerFunc {
+ if mx.notFoundHandler != nil {
+ return mx.notFoundHandler
+ }
+ return http.NotFound
+}
+
+// MethodNotAllowedHandler returns the default Mux 405 responder whenever
+// a method cannot be resolved for a route.
+func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
+ if mx.methodNotAllowedHandler != nil {
+ return mx.methodNotAllowedHandler
+ }
+ return methodNotAllowedHandler
+}
+
+// handle registers a http.Handler in the routing tree for a particular http method
+// and routing pattern.
+func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
+ if len(pattern) == 0 || pattern[0] != '/' {
+ panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
+ }
+
+ // Build the computed routing handler for this routing pattern.
+ if !mx.inline && mx.handler == nil {
+ mx.updateRouteHandler()
+ }
+
+ // Build endpoint handler with inline middlewares for the route
+ var h http.Handler
+ if mx.inline {
+ mx.handler = http.HandlerFunc(mx.routeHTTP)
+ h = Chain(mx.middlewares...).Handler(handler)
+ } else {
+ h = handler
+ }
+
+ // Add the endpoint to the tree and return the node
+ return mx.tree.InsertRoute(method, pattern, h)
+}
+
+// routeHTTP routes a http.Request through the Mux routing tree to serve
+// the matching handler for a particular http method.
+func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Grab the route context object
+ rctx := r.Context().Value(RouteCtxKey).(*Context)
+
+ // The request routing path
+ routePath := rctx.RoutePath
+ if routePath == "" {
+ if r.URL.RawPath != "" {
+ routePath = r.URL.RawPath
+ } else {
+ routePath = r.URL.Path
+ }
+ if routePath == "" {
+ routePath = "/"
+ }
+ }
+
+ // Check if method is supported by chi
+ if rctx.RouteMethod == "" {
+ rctx.RouteMethod = r.Method
+ }
+ method, ok := methodMap[rctx.RouteMethod]
+ if !ok {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ return
+ }
+
+ // Find the route
+ if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
+ h.ServeHTTP(w, r)
+ return
+ }
+ if rctx.methodNotAllowed {
+ mx.MethodNotAllowedHandler().ServeHTTP(w, r)
+ } else {
+ mx.NotFoundHandler().ServeHTTP(w, r)
+ }
+}
+
+func (mx *Mux) nextRoutePath(rctx *Context) string {
+ routePath := "/"
+ nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
+ if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
+ routePath = "/" + rctx.routeParams.Values[nx]
+ }
+ return routePath
+}
+
+// Recursively update data on child routers.
+func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
+ for _, r := range mx.tree.routes() {
+ subMux, ok := r.SubRoutes.(*Mux)
+ if !ok {
+ continue
+ }
+ fn(subMux)
+ }
+}
+
+// updateRouteHandler builds the single mux handler that is a chain of the middleware
+// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
+// point, no other middlewares can be registered on this Mux's stack. But you can still
+// compose additional middlewares via Group()'s or using a chained middleware handler.
+func (mx *Mux) updateRouteHandler() {
+ mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
+}
+
+// methodNotAllowedHandler is a helper function to respond with a 405,
+// method not allowed.
+func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(405)
+ w.Write(nil)
+}
diff --git a/vendor/github.com/go-chi/chi/v5/tree.go b/vendor/github.com/go-chi/chi/v5/tree.go
new file mode 100644
index 0000000..4189b52
--- /dev/null
+++ b/vendor/github.com/go-chi/chi/v5/tree.go
@@ -0,0 +1,866 @@
+package chi
+
+// Radix tree implementation below is a based on the original work by
+// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
+// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+type methodTyp uint
+
+const (
+ mSTUB methodTyp = 1 << iota
+ mCONNECT
+ mDELETE
+ mGET
+ mHEAD
+ mOPTIONS
+ mPATCH
+ mPOST
+ mPUT
+ mTRACE
+)
+
+var mALL = mCONNECT | mDELETE | mGET | mHEAD |
+ mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
+
+var methodMap = map[string]methodTyp{
+ http.MethodConnect: mCONNECT,
+ http.MethodDelete: mDELETE,
+ http.MethodGet: mGET,
+ http.MethodHead: mHEAD,
+ http.MethodOptions: mOPTIONS,
+ http.MethodPatch: mPATCH,
+ http.MethodPost: mPOST,
+ http.MethodPut: mPUT,
+ http.MethodTrace: mTRACE,
+}
+
+// RegisterMethod adds support for custom HTTP method handlers, available
+// via Router#Method and Router#MethodFunc
+func RegisterMethod(method string) {
+ if method == "" {
+ return
+ }
+ method = strings.ToUpper(method)
+ if _, ok := methodMap[method]; ok {
+ return
+ }
+ n := len(methodMap)
+ if n > strconv.IntSize-2 {
+ panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
+ }
+ mt := methodTyp(2 << n)
+ methodMap[method] = mt
+ mALL |= mt
+}
+
+type nodeTyp uint8
+
+const (
+ ntStatic nodeTyp = iota // /home
+ ntRegexp // /{id:[0-9]+}
+ ntParam // /{user}
+ ntCatchAll // /api/v1/*
+)
+
+type node struct {
+ // subroutes on the leaf node
+ subroutes Routes
+
+ // regexp matcher for regexp nodes
+ rex *regexp.Regexp
+
+ // HTTP handler endpoints on the leaf node
+ endpoints endpoints
+
+ // prefix is the common prefix we ignore
+ prefix string
+
+ // child nodes should be stored in-order for iteration,
+ // in groups of the node type.
+ children [ntCatchAll + 1]nodes
+
+ // first byte of the child prefix
+ tail byte
+
+ // node type: static, regexp, param, catchAll
+ typ nodeTyp
+
+ // first byte of the prefix
+ label byte
+}
+
+// endpoints is a mapping of http method constants to handlers
+// for a given route.
+type endpoints map[methodTyp]*endpoint
+
+type endpoint struct {
+ // endpoint handler
+ handler http.Handler
+
+ // pattern is the routing pattern for handler nodes
+ pattern string
+
+ // parameter keys recorded on handler nodes
+ paramKeys []string
+}
+
+func (s endpoints) Value(method methodTyp) *endpoint {
+ mh, ok := s[method]
+ if !ok {
+ mh = &endpoint{}
+ s[method] = mh
+ }
+ return mh
+}
+
+func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
+ var parent *node
+ search := pattern
+
+ for {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ // Insert or update the node's leaf handler
+ n.setEndpoint(method, handler, pattern)
+ return n
+ }
+
+ // We're going to be searching for a wild node next,
+ // in this case, we need to get the tail
+ var label = search[0]
+ var segTail byte
+ var segEndIdx int
+ var segTyp nodeTyp
+ var segRexpat string
+ if label == '{' || label == '*' {
+ segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
+ }
+
+ var prefix string
+ if segTyp == ntRegexp {
+ prefix = segRexpat
+ }
+
+ // Look for the edge to attach to
+ parent = n
+ n = n.getEdge(segTyp, label, segTail, prefix)
+
+ // No edge, create one
+ if n == nil {
+ child := &node{label: label, tail: segTail, prefix: search}
+ hn := parent.addChild(child, search)
+ hn.setEndpoint(method, handler, pattern)
+
+ return hn
+ }
+
+ // Found an edge to match the pattern
+
+ if n.typ > ntStatic {
+ // We found a param node, trim the param from the search path and continue.
+ // This param/wild pattern segment would already be on the tree from a previous
+ // call to addChild when creating a new node.
+ search = search[segEndIdx:]
+ continue
+ }
+
+ // Static nodes fall below here.
+ // Determine longest prefix of the search key on match.
+ commonPrefix := longestPrefix(search, n.prefix)
+ if commonPrefix == len(n.prefix) {
+ // the common prefix is as long as the current node's prefix we're attempting to insert.
+ // keep the search going.
+ search = search[commonPrefix:]
+ continue
+ }
+
+ // Split the node
+ child := &node{
+ typ: ntStatic,
+ prefix: search[:commonPrefix],
+ }
+ parent.replaceChild(search[0], segTail, child)
+
+ // Restore the existing node
+ n.label = n.prefix[commonPrefix]
+ n.prefix = n.prefix[commonPrefix:]
+ child.addChild(n, n.prefix)
+
+ // If the new key is a subset, set the method/handler on this node and finish.
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ child.setEndpoint(method, handler, pattern)
+ return child
+ }
+
+ // Create a new edge for the node
+ subchild := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn := child.addChild(subchild, search)
+ hn.setEndpoint(method, handler, pattern)
+ return hn
+ }
+}
+
+// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
+// For a URL router like chi's, we split the static, param, regexp and wildcard segments
+// into different nodes. In addition, addChild will recursively call itself until every
+// pattern segment is added to the url pattern tree as individual nodes, depending on type.
+func (n *node) addChild(child *node, prefix string) *node {
+ search := prefix
+
+ // handler leaf node added to the tree is the child.
+ // this may be overridden later down the flow
+ hn := child
+
+ // Parse next segment
+ segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
+
+ // Add child depending on next up segment
+ switch segTyp {
+
+ case ntStatic:
+ // Search prefix is all static (that is, has no params in path)
+ // noop
+
+ default:
+ // Search prefix contains a param, regexp or wildcard
+
+ if segTyp == ntRegexp {
+ rex, err := regexp.Compile(segRexpat)
+ if err != nil {
+ panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
+ }
+ child.prefix = segRexpat
+ child.rex = rex
+ }
+
+ if segStartIdx == 0 {
+ // Route starts with a param
+ child.typ = segTyp
+
+ if segTyp == ntCatchAll {
+ segStartIdx = -1
+ } else {
+ segStartIdx = segEndIdx
+ }
+ if segStartIdx < 0 {
+ segStartIdx = len(search)
+ }
+ child.tail = segTail // for params, we set the tail
+
+ if segStartIdx != len(search) {
+ // add static edge for the remaining part, split the end.
+ // its not possible to have adjacent param nodes, so its certainly
+ // going to be a static node next.
+
+ search = search[segStartIdx:] // advance search position
+
+ nn := &node{
+ typ: ntStatic,
+ label: search[0],
+ prefix: search,
+ }
+ hn = child.addChild(nn, search)
+ }
+
+ } else if segStartIdx > 0 {
+ // Route has some param
+
+ // starts with a static segment
+ child.typ = ntStatic
+ child.prefix = search[:segStartIdx]
+ child.rex = nil
+
+ // add the param edge node
+ search = search[segStartIdx:]
+
+ nn := &node{
+ typ: segTyp,
+ label: search[0],
+ tail: segTail,
+ }
+ hn = child.addChild(nn, search)
+
+ }
+ }
+
+ n.children[child.typ] = append(n.children[child.typ], child)
+ n.children[child.typ].Sort()
+ return hn
+}
+
+func (n *node) replaceChild(label, tail byte, child *node) {
+ for i := 0; i < len(n.children[child.typ]); i++ {
+ if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
+ n.children[child.typ][i] = child
+ n.children[child.typ][i].label = label
+ n.children[child.typ][i].tail = tail
+ return
+ }
+ }
+ panic("chi: replacing missing child")
+}
+
+func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
+ nds := n.children[ntyp]
+ for i := 0; i < len(nds); i++ {
+ if nds[i].label == label && nds[i].tail == tail {
+ if ntyp == ntRegexp && nds[i].prefix != prefix {
+ continue
+ }
+ return nds[i]
+ }
+ }
+ return nil
+}
+
+func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
+ // Set the handler for the method type on the node
+ if n.endpoints == nil {
+ n.endpoints = make(endpoints)
+ }
+
+ paramKeys := patParamKeys(pattern)
+
+ if method&mSTUB == mSTUB {
+ n.endpoints.Value(mSTUB).handler = handler
+ }
+ if method&mALL == mALL {
+ h := n.endpoints.Value(mALL)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ for _, m := range methodMap {
+ h := n.endpoints.Value(m)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+ } else {
+ h := n.endpoints.Value(method)
+ h.handler = handler
+ h.pattern = pattern
+ h.paramKeys = paramKeys
+ }
+}
+
+func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
+ // Reset the context routing pattern and params
+ rctx.routePattern = ""
+ rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
+ rctx.routeParams.Values = rctx.routeParams.Values[:0]
+
+ // Find the routing handlers for the path
+ rn := n.findRoute(rctx, method, path)
+ if rn == nil {
+ return nil, nil, nil
+ }
+
+ // Record the routing params in the request lifecycle
+ rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
+ rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
+
+ // Record the routing pattern in the request lifecycle
+ if rn.endpoints[method].pattern != "" {
+ rctx.routePattern = rn.endpoints[method].pattern
+ rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
+ }
+
+ return rn, rn.endpoints, rn.endpoints[method].handler
+}
+
+// Recursive edge traversal by checking all nodeTyp groups along the way.
+// It's like searching through a multi-dimensional radix trie.
+func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
+ nn := n
+ search := path
+
+ for t, nds := range nn.children {
+ ntyp := nodeTyp(t)
+ if len(nds) == 0 {
+ continue
+ }
+
+ var xn *node
+ xsearch := search
+
+ var label byte
+ if search != "" {
+ label = search[0]
+ }
+
+ switch ntyp {
+ case ntStatic:
+ xn = nds.findEdge(label)
+ if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
+ continue
+ }
+ xsearch = xsearch[len(xn.prefix):]
+
+ case ntParam, ntRegexp:
+ // short-circuit and return no matching route for empty param values
+ if xsearch == "" {
+ continue
+ }
+
+ // serially loop through each node grouped by the tail delimiter
+ for idx := 0; idx < len(nds); idx++ {
+ xn = nds[idx]
+
+ // label for param nodes is the delimiter byte
+ p := strings.IndexByte(xsearch, xn.tail)
+
+ if p < 0 {
+ if xn.tail == '/' {
+ p = len(xsearch)
+ } else {
+ continue
+ }
+ } else if ntyp == ntRegexp && p == 0 {
+ continue
+ }
+
+ if ntyp == ntRegexp && xn.rex != nil {
+ if !xn.rex.MatchString(xsearch[:p]) {
+ continue
+ }
+ } else if strings.IndexByte(xsearch[:p], '/') != -1 {
+ // avoid a match across path segments
+ continue
+ }
+
+ prevlen := len(rctx.routeParams.Values)
+ rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
+ xsearch = xsearch[p:]
+
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node on this branch
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // not found on this branch, reset vars
+ rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
+ xsearch = search
+ }
+
+ rctx.routeParams.Values = append(rctx.routeParams.Values, "")
+
+ default:
+ // catch-all nodes
+ rctx.routeParams.Values = append(rctx.routeParams.Values, search)
+ xn = nds[0]
+ xsearch = ""
+ }
+
+ if xn == nil {
+ continue
+ }
+
+ // did we find it yet?
+ if len(xsearch) == 0 {
+ if xn.isLeaf() {
+ h := xn.endpoints[method]
+ if h != nil && h.handler != nil {
+ rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
+ return xn
+ }
+
+ // flag that the routing context found a route, but not a corresponding
+ // supported method
+ rctx.methodNotAllowed = true
+ }
+ }
+
+ // recursively find the next node..
+ fin := xn.findRoute(rctx, method, xsearch)
+ if fin != nil {
+ return fin
+ }
+
+ // Did not find final handler, let's remove the param here if it was set
+ if xn.typ > ntStatic {
+ if len(rctx.routeParams.Values) > 0 {
+ rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
+ }
+ }
+
+ }
+
+ return nil
+}
+
+func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
+ nds := n.children[ntyp]
+ num := len(nds)
+ idx := 0
+
+ switch ntyp {
+ case ntStatic, ntParam, ntRegexp:
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > nds[idx].label {
+ i = idx + 1
+ } else if label < nds[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if nds[idx].label != label {
+ return nil
+ }
+ return nds[idx]
+
+ default: // catch all
+ return nds[idx]
+ }
+}
+
+func (n *node) isLeaf() bool {
+ return n.endpoints != nil
+}
+
+func (n *node) findPattern(pattern string) bool {
+ nn := n
+ for _, nds := range nn.children {
+ if len(nds) == 0 {
+ continue
+ }
+
+ n = nn.findEdge(nds[0].typ, pattern[0])
+ if n == nil {
+ continue
+ }
+
+ var idx int
+ var xpattern string
+
+ switch n.typ {
+ case ntStatic:
+ idx = longestPrefix(pattern, n.prefix)
+ if idx < len(n.prefix) {
+ continue
+ }
+
+ case ntParam, ntRegexp:
+ idx = strings.IndexByte(pattern, '}') + 1
+
+ case ntCatchAll:
+ idx = longestPrefix(pattern, "*")
+
+ default:
+ panic("chi: unknown node type")
+ }
+
+ xpattern = pattern[idx:]
+ if len(xpattern) == 0 {
+ return true
+ }
+
+ return n.findPattern(xpattern)
+ }
+ return false
+}
+
+func (n *node) routes() []Route {
+ rts := []Route{}
+
+ n.walk(func(eps endpoints, subroutes Routes) bool {
+ if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
+ return false
+ }
+
+ // Group methodHandlers by unique patterns
+ pats := make(map[string]endpoints)
+
+ for mt, h := range eps {
+ if h.pattern == "" {
+ continue
+ }
+ p, ok := pats[h.pattern]
+ if !ok {
+ p = endpoints{}
+ pats[h.pattern] = p
+ }
+ p[mt] = h
+ }
+
+ for p, mh := range pats {
+ hs := make(map[string]http.Handler)
+ if mh[mALL] != nil && mh[mALL].handler != nil {
+ hs["*"] = mh[mALL].handler
+ }
+
+ for mt, h := range mh {
+ if h.handler == nil {
+ continue
+ }
+ m := methodTypString(mt)
+ if m == "" {
+ continue
+ }
+ hs[m] = h.handler
+ }
+
+ rt := Route{subroutes, hs, p}
+ rts = append(rts, rt)
+ }
+
+ return false
+ })
+
+ return rts
+}
+
+func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
+ // Visit the leaf values if any
+ if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, ns := range n.children {
+ for _, cn := range ns {
+ if cn.walk(fn) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// patNextSegment returns the next segment details from a pattern:
+// node type, param key, regexp string, param tail byte, param starting index, param ending index
+func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
+ ps := strings.Index(pattern, "{")
+ ws := strings.Index(pattern, "*")
+
+ if ps < 0 && ws < 0 {
+ return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
+ }
+
+ // Sanity check
+ if ps >= 0 && ws >= 0 && ws < ps {
+ panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
+ }
+
+ var tail byte = '/' // Default endpoint tail to / byte
+
+ if ps >= 0 {
+ // Param/Regexp pattern is next
+ nt := ntParam
+
+ // Read to closing } taking into account opens and closes in curl count (cc)
+ cc := 0
+ pe := ps
+ for i, c := range pattern[ps:] {
+ if c == '{' {
+ cc++
+ } else if c == '}' {
+ cc--
+ if cc == 0 {
+ pe = ps + i
+ break
+ }
+ }
+ }
+ if pe == ps {
+ panic("chi: route param closing delimiter '}' is missing")
+ }
+
+ key := pattern[ps+1 : pe]
+ pe++ // set end to next position
+
+ if pe < len(pattern) {
+ tail = pattern[pe]
+ }
+
+ var rexpat string
+ if idx := strings.Index(key, ":"); idx >= 0 {
+ nt = ntRegexp
+ rexpat = key[idx+1:]
+ key = key[:idx]
+ }
+
+ if len(rexpat) > 0 {
+ if rexpat[0] != '^' {
+ rexpat = "^" + rexpat
+ }
+ if rexpat[len(rexpat)-1] != '$' {
+ rexpat += "$"
+ }
+ }
+
+ return nt, key, rexpat, tail, ps, pe
+ }
+
+ // Wildcard pattern as finale
+ if ws < len(pattern)-1 {
+ panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
+ }
+ return ntCatchAll, "*", "", 0, ws, len(pattern)
+}
+
+func patParamKeys(pattern string) []string {
+ pat := pattern
+ paramKeys := []string{}
+ for {
+ ptyp, paramKey, _, _, _, e := patNextSegment(pat)
+ if ptyp == ntStatic {
+ return paramKeys
+ }
+ for i := 0; i < len(paramKeys); i++ {
+ if paramKeys[i] == paramKey {
+ panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
+ }
+ }
+ paramKeys = append(paramKeys, paramKey)
+ pat = pat[e:]
+ }
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+func methodTypString(method methodTyp) string {
+ for s, t := range methodMap {
+ if method == t {
+ return s
+ }
+ }
+ return ""
+}
+
+type nodes []*node
+
+// Sort the list of nodes by label
+func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
+func (ns nodes) Len() int { return len(ns) }
+func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
+
+// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
+// The list order determines the traversal order.
+func (ns nodes) tailSort() {
+ for i := len(ns) - 1; i >= 0; i-- {
+ if ns[i].typ > ntStatic && ns[i].tail == '/' {
+ ns.Swap(i, len(ns)-1)
+ return
+ }
+ }
+}
+
+func (ns nodes) findEdge(label byte) *node {
+ num := len(ns)
+ idx := 0
+ i, j := 0, num-1
+ for i <= j {
+ idx = i + (j-i)/2
+ if label > ns[idx].label {
+ i = idx + 1
+ } else if label < ns[idx].label {
+ j = idx - 1
+ } else {
+ i = num // breaks cond
+ }
+ }
+ if ns[idx].label != label {
+ return nil
+ }
+ return ns[idx]
+}
+
+// Route describes the details of a routing handler.
+// Handlers map key is an HTTP method
+type Route struct {
+ SubRoutes Routes
+ Handlers map[string]http.Handler
+ Pattern string
+}
+
+// WalkFunc is the type of the function called for each method and route visited by Walk.
+type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
+
+// Walk walks any router tree that implements Routes interface.
+func Walk(r Routes, walkFn WalkFunc) error {
+ return walk(r, walkFn, "")
+}
+
+func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
+ for _, route := range r.Routes() {
+ mws := make([]func(http.Handler) http.Handler, len(parentMw))
+ copy(mws, parentMw)
+ mws = append(mws, r.Middlewares()...)
+
+ if route.SubRoutes != nil {
+ if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
+ return err
+ }
+ continue
+ }
+
+ for method, handler := range route.Handlers {
+ if method == "*" {
+ // Ignore a "catchAll" method, since we pass down all the specific methods for each route.
+ continue
+ }
+
+ fullRoute := parentRoute + route.Pattern
+ fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
+
+ if chain, ok := handler.(*ChainHandler); ok {
+ if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
+ return err
+ }
+ } else {
+ if err := walkFn(method, fullRoute, handler, mws...); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 0000000..94ff801
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,29 @@
+run:
+ timeout: 1m
+ tests: true
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - deadcode
+ - errcheck
+ - forcetypeassert
+ - gocritic
+ - gofmt
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+
+issues:
+ exclude-use-default: false
+ max-issues-per-linter: 0
+ max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 0000000..c356960
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release. Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 0000000..5d37e29
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project. Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility. Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible. Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 0000000..ab59311
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,282 @@
+# A minimal logging API for Go
+
+[](https://pkg.go.dev/github.com/go-logr/logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation. This is not
+an implementation of logging - it is an API. In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors. It provides
+a relatively small API which can be used everywhere you want to emit logs. It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers. It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless. Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong. In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use. Something like:
+
+```
+ func main() {
+ // ... other setup code ...
+
+ // Create the "root" logger. We have chosen the "logimpl" implementation,
+ // which takes some initial parameters and returns a logr.Logger.
+ logger := logimpl.New(param1, param2)
+
+ // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc. The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed. For example:
+
+```
+ app := createTheAppObject(logger)
+ app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation. They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+ type appObject struct {
+ // ... other fields ...
+ logger logr.Logger
+ // ... other fields ...
+ }
+
+ func (app *appObject) Run() {
+ app.logger.Info("starting up", "timestamp", time.Now())
+
+ // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed. Alas, here we are.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug." Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc., instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects.) Structured logs allow you to preserve that structure when
+ outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+ a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs.)
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+ [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+ more complex ones. Kubernetes is one example of a project that has
+ [adopted that
+ convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier. Your
+keys are, effectively, the schema of each log message. If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use. `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered. Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented. All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 0000000..9d92a38
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it. It can be
+// used whenever the caller is not interested in the logs. Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+ return Logger{
+ level: 0,
+ sink: discardLogSink{},
+ }
+}
+
+// discardLogSink is a LogSink that discards all messages.
+type discardLogSink struct{}
+
+// Verify that it actually implements the interface
+var _ LogSink = discardLogSink{}
+
+func (l discardLogSink) Init(RuntimeInfo) {
+}
+
+func (l discardLogSink) Enabled(int) bool {
+ return false
+}
+
+func (l discardLogSink) Info(int, string, ...interface{}) {
+}
+
+func (l discardLogSink) Error(error, string, ...interface{}) {
+}
+
+func (l discardLogSink) WithValues(...interface{}) LogSink {
+ return l
+}
+
+func (l discardLogSink) WithName(string) LogSink {
+ return l
+}
diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod
new file mode 100644
index 0000000..7baec9b
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-logr/logr
+
+go 1.16
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 0000000..c3b56b3
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,510 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API. Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// Usage
+//
+// Logging is done using a Logger instance. Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface. The main
+// methods of Logger are Info() and Error(). Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+// log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+// logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same. Instead of:
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity. If there is no error
+// instance available, passing nil is valid.
+//
+// Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode". To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written. Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
+//
+// We can write:
+// logger.V(2).Info("an unusual thing happened")
+//
+// Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context. For example, you might want to add
+// a subsystem name:
+//
+// logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use. Repeated use of WithName()
+// will accumulate name "segments". These name segments will be joined in some
+// way by the LogSink implementation. It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance. For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate. There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string. Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values. Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// Calling methods with the null logger (Logger{}) as instance will crash
+// because it has no LogSink. Therefore this null logger should never be passed
+// around. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// * be human-readable and meaningful (not auto-generated or simple ordinals)
+// * be constant (not dependent on input data)
+// * contain only printable characters
+// * not contain whitespace or punctuation
+// * use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+// * "caller": the calling information (file/line) of a particular log line
+// * "error": the underlying error value in the `Error` method
+// * "level": the log level
+// * "logger": the name of the associated logger
+// * "msg": the log message
+// * "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// * "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation. The recommended pattern for this is:
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
+//
+// Logger grants access to the sink to enable type assertions like this:
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink()(impl.Underlier) {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+import (
+ "context"
+)
+
+// New returns a new Logger instance. This is primarily used by libraries
+// implementing LogSink, rather than end users.
+func New(sink LogSink) Logger {
+ logger := Logger{}
+ logger.setSink(sink)
+ sink.Init(runtimeInfo)
+ return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+ l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+ return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+ l.setSink(sink)
+ return l
+}
+
+// Logger is an interface to an abstract logging implementation. This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink. Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+ sink LogSink
+ level int
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+ return l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line. The key/value pairs can then be used to add additional variable
+// information. The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...interface{}) {
+ if l.Enabled() {
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Info(l.level, msg, keysAndValues...)
+ }
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger. In other words, V-levels are additive. A higher verbosity
+// level means a log message is less important. Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+ if level < 0 {
+ level = 0
+ }
+ l.level += level
+ return l
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
+ l.setSink(l.sink.WithValues(keysAndValues...))
+ return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name. Successive calls with WithName append additional
+// suffixes to the Logger's name. It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+ l.setSink(l.sink.WithName(name))
+ return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods. If depth is 0 the attribution
+// should be to the direct caller of this function. If depth is 1 the
+// attribution should skip 1 call frame, and so on. Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned. If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(depth))
+ }
+ return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible. This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+ var helper func()
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(1))
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ helper = withHelper.GetCallStackHelper()
+ } else {
+ helper = func() {}
+ }
+ return helper, l
+}
+
+// contextKey is how we find Loggers in a context.Context.
+type contextKey struct{}
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+ // CallDepth is the number of call frames the logr library adds between the
+ // end-user and the LogSink. LogSink implementations which choose to print
+ // the original logging site (e.g. file & line) should climb this many
+ // additional frames to find it.
+ CallDepth int
+}
+
+// runtimeInfo is a static global. It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+ CallDepth: 1,
+}
+
+// LogSink represents a logging implementation. End-users will generally not
+// interact with this type.
+type LogSink interface {
+ // Init receives optional information about the logr library for LogSink
+ // implementations that need it.
+ Init(info RuntimeInfo)
+
+ // Enabled tests whether this LogSink is enabled at the specified V-level.
+ // For example, commandline flags might be used to set the logging
+ // verbosity and disable some info logs.
+ Enabled(level int) bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ // The level argument is provided for optional logging. This method will
+ // only be called when Enabled(level) is true. See Logger.Info for more
+ // details.
+ Info(level int, msg string, keysAndValues ...interface{})
+
+ // Error logs an error, with the given message and key/value pairs as
+ // context. See Logger.Error for more details.
+ Error(err error, msg string, keysAndValues ...interface{})
+
+ // WithValues returns a new LogSink with additional key/value pairs. See
+ // Logger.WithValues for more details.
+ WithValues(keysAndValues ...interface{}) LogSink
+
+ // WithName returns a new LogSink with the specified name appended. See
+ // Logger.WithName for more details.
+ WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a Logger that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames. This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+ // WithCallDepth returns a LogSink that will offset the call
+ // stack by the specified number of frames when logging call
+ // site information.
+ //
+ // If depth is 0, the LogSink should skip exactly the number
+ // of call frames defined in RuntimeInfo.CallDepth when Info
+ // or Error are called, i.e. the attribution should be to the
+ // direct caller of Logger.Info or Logger.Error.
+ //
+ // If depth is 1 the attribution should skip 1 call frame, and so on.
+ // Successive calls to this are additive.
+ WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a Logger that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+ // GetCallStackHelper returns a function that must be called
+ // to mark the direct caller as helper function when logging
+ // call site information.
+ GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+ // MarshalLog can be used to:
+ // - ensure that structs are not logged as strings when the original
+ // value has a String method: return a different type without a
+ // String method
+ // - select which fields of a complex type should get logged:
+ // return a simpler struct with fewer fields
+ // - log unexported fields: return a different struct
+ // with exported fields
+ //
+ // It may return any value of any type.
+ MarshalLog() interface{}
+}
diff --git a/vendor/github.com/go-macaron/inject/.travis.yml b/vendor/github.com/go-macaron/inject/.travis.yml
new file mode 100644
index 0000000..2774fb3
--- /dev/null
+++ b/vendor/github.com/go-macaron/inject/.travis.yml
@@ -0,0 +1,14 @@
+sudo: false
+language: go
+
+go:
+ - 1.3
+ - 1.4
+ - 1.5
+ - tip
+
+script: go test -v -cover -race
+
+notifications:
+ email:
+ - u@gogs.io
diff --git a/vendor/github.com/go-macaron/inject/LICENSE b/vendor/github.com/go-macaron/inject/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/go-macaron/inject/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-macaron/inject/README.md b/vendor/github.com/go-macaron/inject/README.md
new file mode 100644
index 0000000..c65c769
--- /dev/null
+++ b/vendor/github.com/go-macaron/inject/README.md
@@ -0,0 +1,11 @@
+# inject [](https://travis-ci.org/go-macaron/inject) [](http://gocover.io/github.com/go-macaron/inject)
+
+Package inject provides utilities for mapping and injecting dependencies in various ways.
+
+**This a modified version of [codegangsta/inject](https://github.com/codegangsta/inject) for special purpose of Macaron**
+
+**Please use the original version if you need dependency injection feature**
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
\ No newline at end of file
diff --git a/vendor/github.com/go-macaron/inject/inject.go b/vendor/github.com/go-macaron/inject/inject.go
new file mode 100644
index 0000000..1c1f98e
--- /dev/null
+++ b/vendor/github.com/go-macaron/inject/inject.go
@@ -0,0 +1,262 @@
+// Copyright 2013 Jeremy Saenz
+// Copyright 2015 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package inject provides utilities for mapping and injecting dependencies in various ways.
+package inject
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Injector represents an interface for mapping and injecting dependencies into structs
+// and function arguments.
+type Injector interface {
+ Applicator
+ Invoker
+ TypeMapper
+ // SetParent sets the parent of the injector. If the injector cannot find a
+ // dependency in its Type map it will check its parent before returning an
+ // error.
+ SetParent(Injector)
+}
+
+// Applicator represents an interface for mapping dependencies to a struct.
+type Applicator interface {
+ // Maps dependencies in the Type map to each field in the struct
+ // that is tagged with 'inject'. Returns an error if the injection
+ // fails.
+ Apply(interface{}) error
+}
+
+// Invoker represents an interface for calling functions via reflection.
+type Invoker interface {
+ // Invoke attempts to call the interface{} provided as a function,
+ // providing dependencies for function arguments based on Type. Returns
+ // a slice of reflect.Value representing the returned values of the function.
+ // Returns an error if the injection fails.
+ Invoke(interface{}) ([]reflect.Value, error)
+}
+
+// FastInvoker represents an interface in order to avoid the calling function via reflection.
+//
+// example:
+// type handlerFuncHandler func(http.ResponseWriter, *http.Request) error
+// func (f handlerFuncHandler)Invoke([]interface{}) ([]reflect.Value, error){
+// ret := f(p[0].(http.ResponseWriter), p[1].(*http.Request))
+// return []reflect.Value{reflect.ValueOf(ret)}, nil
+// }
+//
+// type funcHandler func(int, string)
+// func (f funcHandler)Invoke([]interface{}) ([]reflect.Value, error){
+// f(p[0].(int), p[1].(string))
+// return nil, nil
+// }
+type FastInvoker interface {
+ // Invoke attempts to call the ordinary functions. If f is a function
+ // with the appropriate signature, f.Invoke([]interface{}) is a Call that calls f.
+ // Returns a slice of reflect.Value representing the returned values of the function.
+ // Returns an error if the injection fails.
+ Invoke([]interface{}) ([]reflect.Value, error)
+}
+
+// IsFastInvoker check interface is FastInvoker
+func IsFastInvoker(h interface{}) bool {
+ _, ok := h.(FastInvoker)
+ return ok
+}
+
+// TypeMapper represents an interface for mapping interface{} values based on type.
+type TypeMapper interface {
+ // Maps the interface{} value based on its immediate type from reflect.TypeOf.
+ Map(interface{}) TypeMapper
+ // Maps the interface{} value based on the pointer of an Interface provided.
+ // This is really only useful for mapping a value as an interface, as interfaces
+ // cannot at this time be referenced directly without a pointer.
+ MapTo(interface{}, interface{}) TypeMapper
+ // Provides a possibility to directly insert a mapping based on type and value.
+ // This makes it possible to directly map type arguments not possible to instantiate
+ // with reflect like unidirectional channels.
+ Set(reflect.Type, reflect.Value) TypeMapper
+ // Returns the Value that is mapped to the current type. Returns a zeroed Value if
+ // the Type has not been mapped.
+ GetVal(reflect.Type) reflect.Value
+}
+
+type injector struct {
+ values map[reflect.Type]reflect.Value
+ parent Injector
+}
+
+// InterfaceOf dereferences a pointer to an Interface type.
+// It panics if value is not an pointer to an interface.
+func InterfaceOf(value interface{}) reflect.Type {
+ t := reflect.TypeOf(value)
+
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Interface {
+ panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)")
+ }
+
+ return t
+}
+
+// New returns a new Injector.
+func New() Injector {
+ return &injector{
+ values: make(map[reflect.Type]reflect.Value),
+ }
+}
+
+// Invoke attempts to call the interface{} provided as a function,
+// providing dependencies for function arguments based on Type.
+// Returns a slice of reflect.Value representing the returned values of the function.
+// Returns an error if the injection fails.
+// It panics if f is not a function
+func (inj *injector) Invoke(f interface{}) ([]reflect.Value, error) {
+ t := reflect.TypeOf(f)
+ switch v := f.(type) {
+ case FastInvoker:
+ return inj.fastInvoke(v, t, t.NumIn())
+ default:
+ return inj.callInvoke(f, t, t.NumIn())
+ }
+}
+
+func (inj *injector) fastInvoke(f FastInvoker, t reflect.Type, numIn int) ([]reflect.Value, error) {
+ var in []interface{}
+ if numIn > 0 {
+ in = make([]interface{}, numIn) // Panic if t is not kind of Func
+ var argType reflect.Type
+ var val reflect.Value
+ for i := 0; i < numIn; i++ {
+ argType = t.In(i)
+ val = inj.GetVal(argType)
+ if !val.IsValid() {
+ return nil, fmt.Errorf("Value not found for type %v", argType)
+ }
+
+ in[i] = val.Interface()
+ }
+ }
+ return f.Invoke(in)
+}
+
+// callInvoke reflect.Value.Call
+func (inj *injector) callInvoke(f interface{}, t reflect.Type, numIn int) ([]reflect.Value, error) {
+ var in []reflect.Value
+ if numIn > 0 {
+ in = make([]reflect.Value, numIn)
+ var argType reflect.Type
+ var val reflect.Value
+ for i := 0; i < numIn; i++ {
+ argType = t.In(i)
+ val = inj.GetVal(argType)
+ if !val.IsValid() {
+ return nil, fmt.Errorf("Value not found for type %v", argType)
+ }
+
+ in[i] = val
+ }
+ }
+ return reflect.ValueOf(f).Call(in), nil
+}
+
+// Maps dependencies in the Type map to each field in the struct
+// that is tagged with 'inject'.
+// Returns an error if the injection fails.
+func (inj *injector) Apply(val interface{}) error {
+ v := reflect.ValueOf(val)
+
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ if v.Kind() != reflect.Struct {
+ return nil // Should not panic here ?
+ }
+
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ structField := t.Field(i)
+ if f.CanSet() && (structField.Tag == "inject" || structField.Tag.Get("inject") != "") {
+ ft := f.Type()
+ v := inj.GetVal(ft)
+ if !v.IsValid() {
+ return fmt.Errorf("Value not found for type %v", ft)
+ }
+
+ f.Set(v)
+ }
+
+ }
+
+ return nil
+}
+
+// Maps the concrete value of val to its dynamic type using reflect.TypeOf,
+// It returns the TypeMapper registered in.
+func (i *injector) Map(val interface{}) TypeMapper {
+ i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
+ return i
+}
+
+func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper {
+ i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val)
+ return i
+}
+
+// Maps the given reflect.Type to the given reflect.Value and returns
+// the Typemapper the mapping has been registered in.
+func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
+ i.values[typ] = val
+ return i
+}
+
+func (i *injector) GetVal(t reflect.Type) reflect.Value {
+ val := i.values[t]
+
+ if val.IsValid() {
+ return val
+ }
+
+ // no concrete types found, try to find implementors
+ // if t is an interface
+ if t.Kind() == reflect.Interface {
+ for k, v := range i.values {
+ if k.Implements(t) {
+ val = v
+ break
+ }
+ }
+ }
+
+ // Still no type found, try to look it up on the parent
+ if !val.IsValid() && i.parent != nil {
+ val = i.parent.GetVal(t)
+ }
+
+ return val
+
+}
+
+func (i *injector) SetParent(parent Injector) {
+ i.parent = parent
+}
diff --git a/vendor/github.com/go-macaron/session/.gitignore b/vendor/github.com/go-macaron/session/.gitignore
new file mode 100644
index 0000000..9297dbc
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/.gitignore
@@ -0,0 +1,2 @@
+ledis/tmp.db
+nodb/tmp.db
\ No newline at end of file
diff --git a/vendor/github.com/go-macaron/session/.travis.yml b/vendor/github.com/go-macaron/session/.travis.yml
new file mode 100644
index 0000000..499a0e9
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+language: go
+go:
+ # - 1.6.x: bad for Redis
+ # - 1.7.x: bad for MySQL
+ # - 1.8.x: bad for MySQL
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+
+script: go test -v -cover -race
diff --git a/vendor/github.com/go-macaron/session/LICENSE b/vendor/github.com/go-macaron/session/LICENSE
new file mode 100644
index 0000000..8405e89
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/go-macaron/session/README.md b/vendor/github.com/go-macaron/session/README.md
new file mode 100644
index 0000000..9d878e7
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/README.md
@@ -0,0 +1,22 @@
+# session [](https://travis-ci.org/go-macaron/session)
+
+Middleware session provides session management for [Macaron](https://github.com/go-macaron/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase, Ledis and Nodb.
+
+### Installation
+
+The minimum requirement of Go is 1.6 (*1.7 if using Redis, 1.9 if using MySQL*).
+
+ go get github.com/go-macaron/session
+
+## Getting Help
+
+- [API Reference](https://gowalker.org/github.com/go-macaron/session)
+- [Documentation](https://go-macaron.com/docs/middlewares/session)
+
+## Credits
+
+This package is a modified version of [beego/session](https://github.com/astaxie/beego/tree/master/session).
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.
\ No newline at end of file
diff --git a/vendor/github.com/go-macaron/session/file.go b/vendor/github.com/go-macaron/session/file.go
new file mode 100644
index 0000000..4c686f7
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/file.go
@@ -0,0 +1,266 @@
+// Copyright 2013 Beego Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package session
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/unknwon/com"
+)
+
+// FileStore represents a file session store implementation.
+type FileStore struct {
+ p *FileProvider
+ sid string
+ lock sync.RWMutex
+ data map[interface{}]interface{}
+}
+
+// NewFileStore creates and returns a file session store.
+func NewFileStore(p *FileProvider, sid string, kv map[interface{}]interface{}) *FileStore {
+ return &FileStore{
+ p: p,
+ sid: sid,
+ data: kv,
+ }
+}
+
+// Set sets value to given key in session.
+func (s *FileStore) Set(key, val interface{}) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.data[key] = val
+ return nil
+}
+
+// Get gets value by given key in session.
+func (s *FileStore) Get(key interface{}) interface{} {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ return s.data[key]
+}
+
+// Delete delete a key from session.
+func (s *FileStore) Delete(key interface{}) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ delete(s.data, key)
+ return nil
+}
+
+// ID returns current session ID.
+func (s *FileStore) ID() string {
+ return s.sid
+}
+
+// Release releases resource and save data to provider.
+func (s *FileStore) Release() error {
+ s.p.lock.Lock()
+ defer s.p.lock.Unlock()
+
+ // Skip encoding if the data is empty
+ if len(s.data) == 0 {
+ return nil
+ }
+
+ data, err := EncodeGob(s.data)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(s.p.filepath(s.sid), data, 0600)
+}
+
+// Flush deletes all session data.
+func (s *FileStore) Flush() error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.data = make(map[interface{}]interface{})
+ return nil
+}
+
+// FileProvider represents a file session provider implementation.
+type FileProvider struct {
+ lock sync.RWMutex
+ maxlifetime int64
+ rootPath string
+}
+
+// Init initializes file session provider with given root path.
+func (p *FileProvider) Init(maxlifetime int64, rootPath string) error {
+ p.lock.Lock()
+ p.maxlifetime = maxlifetime
+ p.rootPath = rootPath
+ p.lock.Unlock()
+ return nil
+}
+
+func (p *FileProvider) filepath(sid string) string {
+ return path.Join(p.rootPath, string(sid[0]), string(sid[1]), sid)
+}
+
+// Read returns raw session store by session ID.
+func (p *FileProvider) Read(sid string) (_ RawStore, err error) {
+ filename := p.filepath(sid)
+ if err = os.MkdirAll(path.Dir(filename), 0700); err != nil {
+ return nil, err
+ }
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ var f *os.File
+ if com.IsFile(filename) {
+ f, err = os.OpenFile(filename, os.O_RDONLY, 0600)
+ } else {
+ f, err = os.Create(filename)
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ if err = os.Chtimes(filename, time.Now(), time.Now()); err != nil {
+ return nil, err
+ }
+
+ var kv map[interface{}]interface{}
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+ if len(data) == 0 {
+ kv = make(map[interface{}]interface{})
+ } else {
+ kv, err = DecodeGob(data)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return NewFileStore(p, sid, kv), nil
+}
+
+// Exist returns true if session with given ID exists.
+func (p *FileProvider) Exist(sid string) bool {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+ return com.IsFile(p.filepath(sid))
+}
+
+// Destory deletes a session by session ID.
+func (p *FileProvider) Destory(sid string) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return os.Remove(p.filepath(sid))
+}
+
+func (p *FileProvider) regenerate(oldsid, sid string) (err error) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ filename := p.filepath(sid)
+ if com.IsExist(filename) {
+ return fmt.Errorf("new sid '%s' already exists", sid)
+ }
+
+ oldname := p.filepath(oldsid)
+ if !com.IsFile(oldname) {
+ data, err := EncodeGob(make(map[interface{}]interface{}))
+ if err != nil {
+ return err
+ }
+ if err = os.MkdirAll(path.Dir(oldname), 0700); err != nil {
+ return err
+ }
+ if err = ioutil.WriteFile(oldname, data, 0600); err != nil {
+ return err
+ }
+ }
+
+ if err = os.MkdirAll(path.Dir(filename), 0700); err != nil {
+ return err
+ }
+ if err = os.Rename(oldname, filename); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Regenerate regenerates a session store from old session ID to new one.
+func (p *FileProvider) Regenerate(oldsid, sid string) (_ RawStore, err error) {
+ if err := p.regenerate(oldsid, sid); err != nil {
+ return nil, err
+ }
+
+ return p.Read(sid)
+}
+
+// Count counts and returns number of sessions.
+func (p *FileProvider) Count() int {
+ count := 0
+ if err := filepath.Walk(p.rootPath, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !fi.IsDir() {
+ count++
+ }
+ return nil
+ }); err != nil {
+ log.Printf("error counting session files: %v", err)
+ return 0
+ }
+ return count
+}
+
+// GC calls GC to clean expired sessions.
+func (p *FileProvider) GC() {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ if !com.IsExist(p.rootPath) {
+ return
+ }
+
+ if err := filepath.Walk(p.rootPath, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if !fi.IsDir() &&
+ (fi.ModTime().Unix()+p.maxlifetime) < time.Now().Unix() {
+ return os.Remove(path)
+ }
+ return nil
+ }); err != nil {
+ log.Printf("error garbage collecting session files: %v", err)
+ }
+}
+
+func init() {
+ Register("file", &FileProvider{})
+}
diff --git a/vendor/github.com/go-macaron/session/flash.go b/vendor/github.com/go-macaron/session/flash.go
new file mode 100644
index 0000000..99aae71
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/flash.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package session
+
+import (
+ "net/url"
+
+ "gopkg.in/macaron.v1"
+)
+
+type Flash struct {
+ ctx *macaron.Context
+ url.Values
+ ErrorMsg, WarningMsg, InfoMsg, SuccessMsg string
+}
+
+func (f *Flash) set(name, msg string, current ...bool) {
+ isShow := false
+ if (len(current) == 0 && macaron.FlashNow) ||
+ (len(current) > 0 && current[0]) {
+ isShow = true
+ }
+
+ if isShow {
+ f.ctx.Data["Flash"] = f
+ } else {
+ f.Set(name, msg)
+ }
+}
+
+func (f *Flash) Error(msg string, current ...bool) {
+ f.ErrorMsg = msg
+ f.set("error", msg, current...)
+}
+
+func (f *Flash) Warning(msg string, current ...bool) {
+ f.WarningMsg = msg
+ f.set("warning", msg, current...)
+}
+
+func (f *Flash) Info(msg string, current ...bool) {
+ f.InfoMsg = msg
+ f.set("info", msg, current...)
+}
+
+func (f *Flash) Success(msg string, current ...bool) {
+ f.SuccessMsg = msg
+ f.set("success", msg, current...)
+}
diff --git a/vendor/github.com/go-macaron/session/go.mod b/vendor/github.com/go-macaron/session/go.mod
new file mode 100644
index 0000000..ea5b8d6
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/go.mod
@@ -0,0 +1,38 @@
+module github.com/go-macaron/session
+
+go 1.12
+
+require (
+ github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668
+ github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d // indirect
+ github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b // indirect
+ github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7
+ github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
+ github.com/edsrzf/mmap-go v1.0.0 // indirect
+ github.com/go-sql-driver/mysql v1.4.1
+ github.com/golang/protobuf v1.3.2 // indirect
+ github.com/golang/snappy v0.0.1 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/lib/pq v1.2.0
+ github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
+ github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af
+ github.com/mattn/go-sqlite3 v1.11.0 // indirect
+ github.com/onsi/ginkgo v1.8.0 // indirect
+ github.com/onsi/gomega v1.5.0 // indirect
+ github.com/pelletier/go-toml v1.4.0 // indirect
+ github.com/pkg/errors v0.8.1 // indirect
+ github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
+ github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d // indirect
+ github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92
+ github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect
+ github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337
+ github.com/stretchr/testify v1.3.0 // indirect
+ github.com/syndtr/goleveldb v1.0.0 // indirect
+ github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e
+ google.golang.org/appengine v1.6.1 // indirect
+ gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e // indirect
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+ gopkg.in/ini.v1 v1.46.0
+ gopkg.in/macaron.v1 v1.3.4
+ gopkg.in/redis.v2 v2.3.2
+)
diff --git a/vendor/github.com/go-macaron/session/go.sum b/vendor/github.com/go-macaron/session/go.sum
new file mode 100644
index 0000000..ee2ae96
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/go.sum
@@ -0,0 +1,137 @@
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
+github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
+github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM=
+github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
+github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo=
+github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
+github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8=
+github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc=
+github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=
+github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191 h1:NjHlg70DuOkcAMqgt0+XA+NHwtu66MkTVVgR4fFWbcI=
+github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191/go.mod h1:VFI2o2q9kYsC4o7VP1HrEVosiZZTd+MVT3YZx4gqvJw=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de h1:nyxwRdWHAVxpFcDThedEgQ07DbcRc5xgNObtbTp76fk=
+github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de/go.mod h1:3q8WtuPQsoRbatJuy3nvq/hRSvuBJrHHr+ybPPiNvHQ=
+github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af h1:UaWHNBdukWrSG3DRvHFR/hyfg681fceqQDYVTBncKfQ=
+github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VObJ7peltM+2n3PWOz7yTrfUuGbVFkzN0=
+github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
+github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
+github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
+github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68=
+github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d/go.mod h1:vq0tzqLRu6TS7Id0wMo2N5QzJoKedVeovOpHjnykSzY=
+github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92 h1:qvsJwGToa8rxb42cDRhkbKeX2H5N8BH+s2aUikGt8mI=
+github.com/siddontang/ledisdb v0.0.0-20190202134119-8ceb77e66a92/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
+github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=
+github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY=
+github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
+github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
+github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
+github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
+github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM=
+github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
+golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190802220118-1d1727260058/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e h1:wGA78yza6bu/mWcc4QfBuIEHEtc06xdiU0X8sY36yUU=
+gopkg.in/bufio.v1 v1.0.0-20140618132640-567b2bfa514e/go.mod h1:xsQCaysVCudhrYTfzYWe577fCe7Ceci+6qjO2Rdc0Z4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag=
+gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/macaron.v1 v1.3.4 h1:HvIscOwxhFhx3swWM/979wh2QMYyuXrNmrF9l+j3HZs=
+gopkg.in/macaron.v1 v1.3.4/go.mod h1:/RoHTdC8ALpyJ3+QR36mKjwnT1F1dyYtsGM9Ate6ZFI=
+gopkg.in/redis.v2 v2.3.2 h1:GPVIIB/JnL1wvfULefy3qXmPu1nfNu2d0yA09FHgwfs=
+gopkg.in/redis.v2 v2.3.2/go.mod h1:4wl9PJ/CqzeHk3LVq1hNLHH8krm3+AXEgut4jVc++LU=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-macaron/session/memory.go b/vendor/github.com/go-macaron/session/memory.go
new file mode 100644
index 0000000..4ad9293
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/memory.go
@@ -0,0 +1,217 @@
+// Copyright 2013 Beego Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package session
+
+import (
+ "container/list"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// MemStore represents a in-memory session store implementation.
+type MemStore struct {
+ sid string
+ lock sync.RWMutex
+ data map[interface{}]interface{}
+ lastAccess time.Time
+}
+
+// NewMemStore creates and returns a memory session store.
+func NewMemStore(sid string) *MemStore {
+ return &MemStore{
+ sid: sid,
+ data: make(map[interface{}]interface{}),
+ lastAccess: time.Now(),
+ }
+}
+
+// Set sets value to given key in session.
+func (s *MemStore) Set(key, val interface{}) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.data[key] = val
+ return nil
+}
+
+// Get gets value by given key in session.
+func (s *MemStore) Get(key interface{}) interface{} {
+ s.lock.RLock()
+ defer s.lock.RUnlock()
+
+ return s.data[key]
+}
+
+// Delete deletes a key from session.
+func (s *MemStore) Delete(key interface{}) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ delete(s.data, key)
+ return nil
+}
+
+// ID returns current session ID.
+func (s *MemStore) ID() string {
+ return s.sid
+}
+
+// Release releases resource and save data to provider.
+func (_ *MemStore) Release() error {
+ return nil
+}
+
+// Flush deletes all session data.
+func (s *MemStore) Flush() error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.data = make(map[interface{}]interface{})
+ return nil
+}
+
+// MemProvider represents a in-memory session provider implementation.
+type MemProvider struct {
+ lock sync.RWMutex
+ maxLifetime int64
+ data map[string]*list.Element
+ // A priority list whose lastAccess newer gets higer priority.
+ list *list.List
+}
+
+// Init initializes memory session provider.
+func (p *MemProvider) Init(maxLifetime int64, _ string) error {
+ p.lock.Lock()
+ p.maxLifetime = maxLifetime
+ p.lock.Unlock()
+ return nil
+}
+
+// update expands time of session store by given ID.
+func (p *MemProvider) update(sid string) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if e, ok := p.data[sid]; ok {
+ e.Value.(*MemStore).lastAccess = time.Now()
+ p.list.MoveToFront(e)
+ return nil
+ }
+ return nil
+}
+
+// Read returns raw session store by session ID.
+func (p *MemProvider) Read(sid string) (_ RawStore, err error) {
+ p.lock.RLock()
+ e, ok := p.data[sid]
+ p.lock.RUnlock()
+
+ if ok {
+ if err = p.update(sid); err != nil {
+ return nil, err
+ }
+ return e.Value.(*MemStore), nil
+ }
+
+ // Create a new session.
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ s := NewMemStore(sid)
+ p.data[sid] = p.list.PushBack(s)
+ return s, nil
+}
+
+// Exist returns true if session with given ID exists.
+func (p *MemProvider) Exist(sid string) bool {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ _, ok := p.data[sid]
+ return ok
+}
+
+// Destory deletes a session by session ID.
+func (p *MemProvider) Destory(sid string) error {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ e, ok := p.data[sid]
+ if !ok {
+ return nil
+ }
+
+ p.list.Remove(e)
+ delete(p.data, sid)
+ return nil
+}
+
+// Regenerate regenerates a session store from old session ID to new one.
+func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) {
+ if p.Exist(sid) {
+ return nil, fmt.Errorf("new sid '%s' already exists", sid)
+ }
+
+ s, err := p.Read(oldsid)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = p.Destory(oldsid); err != nil {
+ return nil, err
+ }
+
+ s.(*MemStore).sid = sid
+
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ p.data[sid] = p.list.PushBack(s)
+ return s, nil
+}
+
+// Count counts and returns number of sessions.
+func (p *MemProvider) Count() int {
+ return p.list.Len()
+}
+
+// GC calls GC to clean expired sessions.
+func (p *MemProvider) GC() {
+ p.lock.RLock()
+ for {
+ // No session in the list.
+ e := p.list.Back()
+ if e == nil {
+ break
+ }
+
+ if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() {
+ p.lock.RUnlock()
+ p.lock.Lock()
+ p.list.Remove(e)
+ delete(p.data, e.Value.(*MemStore).sid)
+ p.lock.Unlock()
+ p.lock.RLock()
+ } else {
+ break
+ }
+ }
+ p.lock.RUnlock()
+}
+
+func init() {
+ Register("memory", &MemProvider{list: list.New(), data: make(map[string]*list.Element)})
+}
diff --git a/vendor/github.com/go-macaron/session/session.go b/vendor/github.com/go-macaron/session/session.go
new file mode 100644
index 0000000..97fa56e
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/session.go
@@ -0,0 +1,393 @@
+// Copyright 2013 Beego Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package session a middleware that provides the session management of Macaron.
+package session
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "gopkg.in/macaron.v1"
+)
+
+const _VERSION = "0.6.0"
+
+func Version() string {
+ return _VERSION
+}
+
+// RawStore is the interface that operates the session data.
+type RawStore interface {
+ // Set sets value to given key in session.
+ Set(interface{}, interface{}) error
+ // Get gets value by given key in session.
+ Get(interface{}) interface{}
+ // Delete deletes a key from session.
+ Delete(interface{}) error
+ // ID returns current session ID.
+ ID() string
+ // Release releases session resource and save data to provider.
+ Release() error
+ // Flush deletes all session data.
+ Flush() error
+}
+
+// Store is the interface that contains all data for one session process with specific ID.
+type Store interface {
+ RawStore
+ // Read returns raw session store by session ID.
+ Read(string) (RawStore, error)
+ // Destory deletes a session.
+ Destory(*macaron.Context) error
+ // RegenerateId regenerates a session store from old session ID to new one.
+ RegenerateId(*macaron.Context) (RawStore, error)
+ // Count counts and returns number of sessions.
+ Count() int
+ // GC calls GC to clean expired sessions.
+ GC()
+}
+
+type store struct {
+ RawStore
+ *Manager
+}
+
+var _ Store = &store{}
+
+// Options represents a struct for specifying configuration options for the session middleware.
+type Options struct {
+ // Name of provider. Default is "memory".
+ Provider string
+ // Provider configuration, it's corresponding to provider.
+ ProviderConfig string
+ // Cookie name to save session ID. Default is "MacaronSession".
+ CookieName string
+ // Cookie path to store. Default is "/".
+ CookiePath string
+ // GC interval time in seconds. Default is 3600.
+ Gclifetime int64
+ // Max life time in seconds. Default is whatever GC interval time is.
+ Maxlifetime int64
+ // Use HTTPS only. Default is false.
+ Secure bool
+ // Cookie life time. Default is 0.
+ CookieLifeTime int
+ // Cookie domain name. Default is empty.
+ Domain string
+ // Session ID length. Default is 16.
+ IDLength int
+ // Configuration section name. Default is "session".
+ Section string
+ // Ignore release for websocket. Default is false.
+ IgnoreReleaseForWebSocket bool
+}
+
+func prepareOptions(options []Options) Options {
+ var opt Options
+ if len(options) > 0 {
+ opt = options[0]
+ }
+ if len(opt.Section) == 0 {
+ opt.Section = "session"
+ }
+ sec := macaron.Config().Section(opt.Section)
+
+ if len(opt.Provider) == 0 {
+ opt.Provider = sec.Key("PROVIDER").MustString("memory")
+ }
+ if len(opt.ProviderConfig) == 0 {
+ opt.ProviderConfig = sec.Key("PROVIDER_CONFIG").MustString("data/sessions")
+ }
+ if len(opt.CookieName) == 0 {
+ opt.CookieName = sec.Key("COOKIE_NAME").MustString("MacaronSession")
+ }
+ if len(opt.CookiePath) == 0 {
+ opt.CookiePath = sec.Key("COOKIE_PATH").MustString("/")
+ }
+ if opt.Gclifetime == 0 {
+ opt.Gclifetime = sec.Key("GC_INTERVAL_TIME").MustInt64(3600)
+ }
+ if opt.Maxlifetime == 0 {
+ opt.Maxlifetime = sec.Key("MAX_LIFE_TIME").MustInt64(opt.Gclifetime)
+ }
+ if !opt.Secure {
+ opt.Secure = sec.Key("SECURE").MustBool()
+ }
+ if opt.CookieLifeTime == 0 {
+ opt.CookieLifeTime = sec.Key("COOKIE_LIFE_TIME").MustInt()
+ }
+ if len(opt.Domain) == 0 {
+ opt.Domain = sec.Key("DOMAIN").String()
+ }
+ if opt.IDLength == 0 {
+ opt.IDLength = sec.Key("ID_LENGTH").MustInt(16)
+ }
+ if !opt.IgnoreReleaseForWebSocket {
+ opt.IgnoreReleaseForWebSocket = sec.Key("IGNORE_RELEASE_FOR_WEBSOCKET").MustBool()
+ }
+
+ return opt
+}
+
+// Sessioner is a middleware that maps a session.SessionStore service into the Macaron handler chain.
+// An single variadic session.Options struct can be optionally provided to configure.
+func Sessioner(options ...Options) macaron.Handler {
+ opt := prepareOptions(options)
+ manager, err := NewManager(opt.Provider, opt)
+ if err != nil {
+ panic(err)
+ }
+ go manager.startGC()
+
+ return func(ctx *macaron.Context) {
+ sess, err := manager.Start(ctx)
+ if err != nil {
+ panic("session(start): " + err.Error())
+ }
+
+ // Get flash.
+ vals, _ := url.ParseQuery(ctx.GetCookie("macaron_flash"))
+ if len(vals) > 0 {
+ f := &Flash{Values: vals}
+ f.ErrorMsg = f.Get("error")
+ f.SuccessMsg = f.Get("success")
+ f.InfoMsg = f.Get("info")
+ f.WarningMsg = f.Get("warning")
+ ctx.Data["Flash"] = f
+ ctx.SetCookie("macaron_flash", "", -1, opt.CookiePath)
+ }
+
+ f := &Flash{ctx, url.Values{}, "", "", "", ""}
+ ctx.Resp.Before(func(macaron.ResponseWriter) {
+ if flash := f.Encode(); len(flash) > 0 {
+ ctx.SetCookie("macaron_flash", flash, 0, opt.CookiePath)
+ }
+ })
+
+ ctx.Map(f)
+ s := store{
+ RawStore: sess,
+ Manager: manager,
+ }
+
+ ctx.MapTo(s, (*Store)(nil))
+
+ ctx.Next()
+
+ if manager.opt.IgnoreReleaseForWebSocket && ctx.Req.Header.Get("Upgrade") == "websocket" {
+ return
+ }
+
+ if err = sess.Release(); err != nil {
+ panic("session(release): " + err.Error())
+ }
+ }
+}
+
+// Provider is the interface that provides session manipulations.
+type Provider interface {
+ // Init initializes session provider.
+ Init(gclifetime int64, config string) error
+ // Read returns raw session store by session ID.
+ Read(sid string) (RawStore, error)
+ // Exist returns true if session with given ID exists.
+ Exist(sid string) bool
+ // Destory deletes a session by session ID.
+ Destory(sid string) error
+ // Regenerate regenerates a session store from old session ID to new one.
+ Regenerate(oldsid, sid string) (RawStore, error)
+ // Count counts and returns number of sessions.
+ Count() int
+ // GC calls GC to clean expired sessions.
+ GC()
+}
+
+var providers = make(map[string]Provider)
+
+// Register registers a provider.
+func Register(name string, provider Provider) {
+ if provider == nil {
+ panic("session: cannot register provider with nil value")
+ }
+ if _, dup := providers[name]; dup {
+ panic(fmt.Errorf("session: cannot register provider '%s' twice", name))
+ }
+ providers[name] = provider
+}
+
+// _____
+// / \ _____ ____ _____ ____ ___________
+// / \ / \\__ \ / \\__ \ / ___\_/ __ \_ __ \
+// / Y \/ __ \| | \/ __ \_/ /_/ > ___/| | \/
+// \____|__ (____ /___| (____ /\___ / \___ >__|
+// \/ \/ \/ \//_____/ \/
+
+// Manager represents a struct that contains session provider and its configuration.
+type Manager struct {
+ provider Provider
+ opt Options
+}
+
+// NewManager creates and returns a new session manager by given provider name and configuration.
+// It panics when given provider isn't registered.
+func NewManager(name string, opt Options) (*Manager, error) {
+ p, ok := providers[name]
+ if !ok {
+ return nil, fmt.Errorf("session: unknown provider '%s'(forgotten import?)", name)
+ }
+ return &Manager{p, opt}, p.Init(opt.Maxlifetime, opt.ProviderConfig)
+}
+
+// sessionID generates a new session ID with rand string, unix nano time, remote addr by hash function.
+func (m *Manager) sessionID() string {
+ return hex.EncodeToString(generateRandomKey(m.opt.IDLength / 2))
+}
+
+// validSessionID tests whether a provided session ID is a valid session ID.
+func (m *Manager) validSessionID(sid string) (bool, error) {
+ if len(sid) != m.opt.IDLength {
+ return false, errors.New("invalid 'sid': " + sid)
+ }
+
+ for i := range sid {
+ switch {
+ case '0' <= sid[i] && sid[i] <= '9':
+ case 'a' <= sid[i] && sid[i] <= 'f':
+ default:
+ return false, errors.New("invalid 'sid': " + sid)
+ }
+ }
+ return true, nil
+}
+
+// Start starts a session by generating new one
+// or retrieve existence one by reading session ID from HTTP request if it's valid.
+func (m *Manager) Start(ctx *macaron.Context) (RawStore, error) {
+ sid := ctx.GetCookie(m.opt.CookieName)
+ valid, _ := m.validSessionID(sid)
+ if len(sid) > 0 && valid && m.provider.Exist(sid) {
+ return m.provider.Read(sid)
+ }
+
+ sid = m.sessionID()
+ sess, err := m.provider.Read(sid)
+ if err != nil {
+ return nil, err
+ }
+
+ cookie := &http.Cookie{
+ Name: m.opt.CookieName,
+ Value: sid,
+ Path: m.opt.CookiePath,
+ HttpOnly: true,
+ Secure: m.opt.Secure,
+ Domain: m.opt.Domain,
+ }
+ if m.opt.CookieLifeTime >= 0 {
+ cookie.MaxAge = m.opt.CookieLifeTime
+ }
+ http.SetCookie(ctx.Resp, cookie)
+ ctx.Req.AddCookie(cookie)
+ return sess, nil
+}
+
+// Read returns raw session store by session ID.
+func (m *Manager) Read(sid string) (RawStore, error) {
+ // Ensure we're trying to read a valid session ID
+ if _, err := m.validSessionID(sid); err != nil {
+ return nil, err
+ }
+
+ return m.provider.Read(sid)
+}
+
+// Destory deletes a session by given ID.
+func (m *Manager) Destory(ctx *macaron.Context) error {
+ sid := ctx.GetCookie(m.opt.CookieName)
+ if len(sid) == 0 {
+ return nil
+ }
+
+ if _, err := m.validSessionID(sid); err != nil {
+ return err
+ }
+
+ if err := m.provider.Destory(sid); err != nil {
+ return err
+ }
+ cookie := &http.Cookie{
+ Name: m.opt.CookieName,
+ Path: m.opt.CookiePath,
+ HttpOnly: true,
+ Expires: time.Now(),
+ MaxAge: -1,
+ }
+ http.SetCookie(ctx.Resp, cookie)
+ return nil
+}
+
+// RegenerateId regenerates a session store from old session ID to new one.
+func (m *Manager) RegenerateId(ctx *macaron.Context) (sess RawStore, err error) {
+ sid := m.sessionID()
+ oldsid := ctx.GetCookie(m.opt.CookieName)
+ _, err = m.validSessionID(oldsid)
+ if err != nil {
+ return nil, err
+ }
+ sess, err = m.provider.Regenerate(oldsid, sid)
+ if err != nil {
+ return nil, err
+ }
+ cookie := &http.Cookie{
+ Name: m.opt.CookieName,
+ Value: sid,
+ Path: m.opt.CookiePath,
+ HttpOnly: true,
+ Secure: m.opt.Secure,
+ Domain: m.opt.Domain,
+ }
+ if m.opt.CookieLifeTime >= 0 {
+ cookie.MaxAge = m.opt.CookieLifeTime
+ }
+ http.SetCookie(ctx.Resp, cookie)
+ ctx.Req.AddCookie(cookie)
+ return sess, nil
+}
+
+// Count counts and returns number of sessions.
+func (m *Manager) Count() int {
+ return m.provider.Count()
+}
+
+// GC starts GC job in a certain period.
+func (m *Manager) GC() {
+ m.provider.GC()
+}
+
+// startGC starts GC job in a certain period.
+func (m *Manager) startGC() {
+ m.GC()
+ time.AfterFunc(time.Duration(m.opt.Gclifetime)*time.Second, func() { m.startGC() })
+}
+
+// SetSecure indicates whether to set cookie with HTTPS or not.
+func (m *Manager) SetSecure(secure bool) {
+ m.opt.Secure = secure
+}
diff --git a/vendor/github.com/go-macaron/session/utils.go b/vendor/github.com/go-macaron/session/utils.go
new file mode 100644
index 0000000..762b978
--- /dev/null
+++ b/vendor/github.com/go-macaron/session/utils.go
@@ -0,0 +1,63 @@
+// Copyright 2013 Beego Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package session
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/gob"
+ "io"
+
+ "github.com/unknwon/com"
+)
+
+func init() {
+ gob.Register([]interface{}{})
+ gob.Register(map[int]interface{}{})
+ gob.Register(map[string]interface{}{})
+ gob.Register(map[interface{}]interface{}{})
+ gob.Register(map[string]string{})
+ gob.Register(map[int]string{})
+ gob.Register(map[int]int{})
+ gob.Register(map[int]int64{})
+}
+
+func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) {
+ for _, v := range obj {
+ gob.Register(v)
+ }
+ buf := bytes.NewBuffer(nil)
+ err := gob.NewEncoder(buf).Encode(obj)
+ return buf.Bytes(), err
+}
+
+func DecodeGob(encoded []byte) (out map[interface{}]interface{}, err error) {
+ buf := bytes.NewBuffer(encoded)
+ err = gob.NewDecoder(buf).Decode(&out)
+ return out, err
+}
+
+// NOTE: A local copy in case of underlying package change
+var alphanum = []byte("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
+
+// generateRandomKey creates a random key with the given strength.
+func generateRandomKey(strength int) []byte {
+ k := make([]byte, strength)
+ if n, err := io.ReadFull(rand.Reader, k); n != strength || err != nil {
+ return com.RandomCreateBytes(strength, alphanum...)
+ }
+ return k
+}
diff --git a/vendor/github.com/go-playground/form/v4/.gitignore b/vendor/github.com/go-playground/form/v4/.gitignore
new file mode 100644
index 0000000..4b0b7d2
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+old.txt
+new.txt
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/form/v4/.travis.yml b/vendor/github.com/go-playground/form/v4/.travis.yml
new file mode 100644
index 0000000..fbc61d1
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/.travis.yml
@@ -0,0 +1,26 @@
+language: go
+go:
+ - 1.13.4
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/form/v4/LICENSE b/vendor/github.com/go-playground/form/v4/LICENSE
new file mode 100644
index 0000000..8d8aba1
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-playground/form/v4/README.md b/vendor/github.com/go-playground/form/v4/README.md
new file mode 100644
index 0000000..26b1518
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/README.md
@@ -0,0 +1,333 @@
+Package form
+============
+

+[](https://github.com/go-playground/form/actions/workflows/workflow.yml)
+[](https://coveralls.io/github/go-playground/form?branch=master)
+[](https://goreportcard.com/report/github.com/go-playground/form)
+[](https://godoc.org/github.com/go-playground/form)
+
+[](https://gitter.im/go-playground/form?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Package form Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values.
+
+It has the following features:
+
+- Supports map of almost all types.
+- Supports both Numbered and Normal arrays eg. `"Array[0]"` and just `"Array"` with multiple values passed.
+- Slice honours the specified index. eg. if "Slice[2]" is the only Slice value passed down, it will be put at index 2; if slice isn't big enough it will be expanded.
+- Array honours the specified index. eg. if "Array[2]" is the only Array value passed down, it will be put at index 2; if array isn't big enough a warning will be printed and value ignored.
+- Only creates objects as necessary eg. if no `array` or `map` values are passed down, the `array` and `map` are left as their default values in the struct.
+- Allows for Custom Type registration.
+- Handles time.Time using RFC3339 time format by default, but can easily be changed by registering a Custom Type, see below.
+- Handles Encoding & Decoding of almost all Go types eg. can Decode into struct, array, map, int... and Encode a struct, array, map, int...
+
+Common Questions
+
+- Does it support encoding.TextUnmarshaler? No because TextUnmarshaler only accepts []byte but posted values can have multiple values, so is not suitable.
+- Mixing `array/slice` with `array[idx]/slice[idx]`, in which order are they parsed? `array/slice` then `array[idx]/slice[idx]`
+
+Supported Types ( out of the box )
+----------
+
+* `string`
+* `bool`
+* `int`, `int8`, `int16`, `int32`, `int64`
+* `uint`, `uint8`, `uint16`, `uint32`, `uint64`
+* `float32`, `float64`
+* `struct` and `anonymous struct`
+* `interface{}`
+* `time.Time` - by default using RFC3339
+* a `pointer` to one of the above types
+* `slice`, `array`
+* `map`
+* `custom types` can override any of the above types
+* many other types may be supported inherently
+
+**NOTE**: `map`, `struct` and `slice` nesting are ad infinitum.
+
+Installation
+------------
+
+Use go get.
+
+ go get github.com/go-playground/form
+
+Then import the form package into your own code.
+
+ import "github.com/go-playground/form/v4"
+
+Usage
+-----
+
+- Use symbol `.` for separating fields/structs. (eg. `structfield.field`)
+- Use `[index or key]` for access to index of a slice/array or key for map. (eg. `arrayfield[0]`, `mapfield[keyvalue]`)
+
+```html
+
+```
+
+Examples
+-------
+
+Decoding
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/url"
+
+ "github.com/go-playground/form/v4"
+)
+
+// Address contains address information
+type Address struct {
+ Name string
+ Phone string
+}
+
+// User contains user information
+type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+}
+
+// use a single instance of Decoder, it caches struct info
+var decoder *form.Decoder
+
+func main() {
+ decoder = form.NewDecoder()
+
+ // this simulates the results of http.Request's ParseForm() function
+ values := parseForm()
+
+ var user User
+
+ // must pass a pointer
+ err := decoder.Decode(&user, values)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", user)
+}
+
+// this simulates the results of http.Request's ParseForm() function
+func parseForm() url.Values {
+ return url.Values{
+ "Name": []string{"joeybloggs"},
+ "Age": []string{"3"},
+ "Gender": []string{"Male"},
+ "Address[0].Name": []string{"26 Here Blvd."},
+ "Address[0].Phone": []string{"9(999)999-9999"},
+ "Address[1].Name": []string{"26 There Blvd."},
+ "Address[1].Phone": []string{"1(111)111-1111"},
+ "active": []string{"true"},
+ "MapExample[key]": []string{"value"},
+ "NestedMap[key][key]": []string{"value"},
+ "NestedArray[0][0]": []string{"value"},
+ }
+}
+```
+
+Encoding
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/go-playground/form/v4"
+)
+
+// Address contains address information
+type Address struct {
+ Name string
+ Phone string
+}
+
+// User contains user information
+type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+}
+
+// use a single instance of Encoder, it caches struct info
+var encoder *form.Encoder
+
+func main() {
+ encoder = form.NewEncoder()
+
+ user := User{
+ Name: "joeybloggs",
+ Age: 3,
+ Gender: "Male",
+ Address: []Address{
+ {Name: "26 Here Blvd.", Phone: "9(999)999-9999"},
+ {Name: "26 There Blvd.", Phone: "1(111)111-1111"},
+ },
+ Active: true,
+ MapExample: map[string]string{"key": "value"},
+ NestedMap: map[string]map[string]string{"key": {"key": "value"}},
+ NestedArray: [][]string{{"value"}},
+ }
+
+ // must pass a pointer
+ values, err := encoder.Encode(&user)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", values)
+}
+```
+
+Registering Custom Types
+--------------
+
+Decoder
+```go
+decoder.RegisterCustomTypeFunc(func(vals []string) (interface{}, error) {
+ return time.Parse("2006-01-02", vals[0])
+}, time.Time{})
+```
+ADDITIONAL: if a struct type is registered, the function will only be called if a url.Value exists for
+the struct and not just the struct fields eg. url.Values{"User":"Name%3Djoeybloggs"} will call the
+custom type function with 'User' as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+
+
+Encoder
+```go
+encoder.RegisterCustomTypeFunc(func(x interface{}) ([]string, error) {
+ return []string{x.(time.Time).Format("2006-01-02")}, nil
+}, time.Time{})
+```
+
+Ignoring Fields
+--------------
+you can tell form to ignore fields using `-` in the tag
+```go
+type MyStruct struct {
+ Field string `form:"-"`
+}
+```
+
+Omitempty
+--------------
+you can tell form to omit empty fields using `,omitempty` or `FieldName,omitempty` in the tag
+```go
+type MyStruct struct {
+ Field string `form:",omitempty"`
+ Field2 string `form:"CustomFieldName,omitempty"`
+}
+```
+
+Notes
+------
+To maximize compatibility with other systems the Encoder attempts
+to avoid using array indexes in url.Values if at all possible.
+
+eg.
+```go
+// A struct field of
+Field []string{"1", "2", "3"}
+
+// will be output a url.Value as
+"Field": []string{"1", "2", "3"}
+
+and not
+"Field[0]": []string{"1"}
+"Field[1]": []string{"2"}
+"Field[2]": []string{"3"}
+
+// however there are times where it is unavoidable, like with pointers
+i := int(1)
+Field []*string{nil, nil, &i}
+
+// to avoid index 1 and 2 must use index
+"Field[2]": []string{"1"}
+```
+
+Benchmarks
+------
+###### Run on MacBook Pro (15-inch, 2017) using go version go1.10.1 darwin/amd64
+
+NOTE: the 1 allocation and B/op in the first 4 decodes is actually the struct allocating when passing it in, so primitives are actually zero allocation.
+
+```go
+go test -run=NONE -bench=. -benchmem=true
+goos: darwin
+goarch: amd64
+pkg: github.com/go-playground/form/benchmarks
+
+BenchmarkSimpleUserDecodeStruct-8 5000000 236 ns/op 64 B/op 1 allocs/op
+BenchmarkSimpleUserDecodeStructParallel-8 20000000 82.1 ns/op 64 B/op 1 allocs/op
+BenchmarkSimpleUserEncodeStruct-8 2000000 627 ns/op 485 B/op 10 allocs/op
+BenchmarkSimpleUserEncodeStructParallel-8 10000000 223 ns/op 485 B/op 10 allocs/op
+BenchmarkPrimitivesDecodeStructAllPrimitivesTypes-8 2000000 724 ns/op 96 B/op 1 allocs/op
+BenchmarkPrimitivesDecodeStructAllPrimitivesTypesParallel-8 10000000 246 ns/op 96 B/op 1 allocs/op
+BenchmarkPrimitivesEncodeStructAllPrimitivesTypes-8 500000 3187 ns/op 2977 B/op 36 allocs/op
+BenchmarkPrimitivesEncodeStructAllPrimitivesTypesParallel-8 1000000 1106 ns/op 2977 B/op 36 allocs/op
+BenchmarkComplexArrayDecodeStructAllTypes-8 100000 13748 ns/op 2248 B/op 121 allocs/op
+BenchmarkComplexArrayDecodeStructAllTypesParallel-8 500000 4313 ns/op 2249 B/op 121 allocs/op
+BenchmarkComplexArrayEncodeStructAllTypes-8 200000 10758 ns/op 7113 B/op 104 allocs/op
+BenchmarkComplexArrayEncodeStructAllTypesParallel-8 500000 3532 ns/op 7113 B/op 104 allocs/op
+BenchmarkComplexMapDecodeStructAllTypes-8 100000 17644 ns/op 5305 B/op 130 allocs/op
+BenchmarkComplexMapDecodeStructAllTypesParallel-8 300000 5470 ns/op 5308 B/op 130 allocs/op
+BenchmarkComplexMapEncodeStructAllTypes-8 200000 11155 ns/op 6971 B/op 129 allocs/op
+BenchmarkComplexMapEncodeStructAllTypesParallel-8 500000 3768 ns/op 6971 B/op 129 allocs/op
+BenchmarkDecodeNestedStruct-8 500000 2462 ns/op 384 B/op 14 allocs/op
+BenchmarkDecodeNestedStructParallel-8 2000000 814 ns/op 384 B/op 14 allocs/op
+BenchmarkEncodeNestedStruct-8 1000000 1483 ns/op 693 B/op 16 allocs/op
+BenchmarkEncodeNestedStructParallel-8 3000000 525 ns/op 693 B/op 16 allocs/op
+```
+
+Competitor benchmarks can be found [here](https://github.com/go-playground/form/blob/master/benchmarks/benchmarks.md)
+
+Complimentary Software
+----------------------
+
+Here is a list of software that compliments using this library post decoding.
+
+* [Validator](https://github.com/go-playground/validator) - Go Struct and Field validation, including Cross Field, Cross Struct, Map, Slice and Array diving.
+* [mold](https://github.com/go-playground/mold) - Is a general library to help modify or set data within data structures and other objects.
+
+Package Versioning
+----------
+I'm jumping on the vendoring bandwagon, you should vendor this package as I will not
+be creating different version with gopkg.in like allot of my other libraries.
+
+Why? because my time is spread pretty thin maintaining all of the libraries I have + LIFE,
+it is so freeing not to worry about it and will help me keep pouring out bigger and better
+things for you the community.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/form/v4/cache.go b/vendor/github.com/go-playground/form/v4/cache.go
new file mode 100644
index 0000000..47e5fce
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/cache.go
@@ -0,0 +1,133 @@
+package form
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type cacheFields []cachedField
+
+func (s cacheFields) Len() int {
+ return len(s)
+}
+
+func (s cacheFields) Less(i, j int) bool {
+ return !s[i].isAnonymous
+}
+
+func (s cacheFields) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+type cachedField struct {
+ idx int
+ name string
+ isAnonymous bool
+ isOmitEmpty bool
+}
+
+type cachedStruct struct {
+ fields cacheFields
+}
+
+type structCacheMap struct {
+ m atomic.Value // map[reflect.Type]*cachedStruct
+ lock sync.Mutex
+ tagFn TagNameFunc
+}
+
+// TagNameFunc allows for adding of a custom tag name parser
+type TagNameFunc func(field reflect.StructField) string
+
+func newStructCacheMap() *structCacheMap {
+
+ sc := new(structCacheMap)
+ sc.m.Store(make(map[reflect.Type]*cachedStruct))
+
+ return sc
+}
+
+func (s *structCacheMap) Get(key reflect.Type) (value *cachedStruct, ok bool) {
+ value, ok = s.m.Load().(map[reflect.Type]*cachedStruct)[key]
+ return
+}
+
+func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) {
+
+ m := s.m.Load().(map[reflect.Type]*cachedStruct)
+
+ nm := make(map[reflect.Type]*cachedStruct, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ s.m.Store(nm)
+}
+
+func (s *structCacheMap) parseStruct(mode Mode, current reflect.Value, key reflect.Type, tagName string) *cachedStruct {
+
+ s.lock.Lock()
+
+ // could have been multiple trying to access, but once first is done this ensures struct
+ // isn't parsed again.
+ cs, ok := s.Get(key)
+ if ok {
+ s.lock.Unlock()
+ return cs
+ }
+
+ typ := current.Type()
+ cs = &cachedStruct{fields: make([]cachedField, 0, 4)} // init 4, betting most structs decoding into have at aleast 4 fields.
+
+ numFields := current.NumField()
+
+ var fld reflect.StructField
+ var name string
+ var idx int
+ var isOmitEmpty bool
+
+ for i := 0; i < numFields; i++ {
+ isOmitEmpty = false
+ fld = typ.Field(i)
+
+ if fld.PkgPath != blank && !fld.Anonymous {
+ continue
+ }
+
+ if s.tagFn != nil {
+ name = s.tagFn(fld)
+ } else {
+ name = fld.Tag.Get(tagName)
+ }
+
+ if name == ignore {
+ continue
+ }
+
+ if mode == ModeExplicit && len(name) == 0 {
+ continue
+ }
+
+ // check for omitempty
+ if idx = strings.LastIndexByte(name, ','); idx != -1 {
+ isOmitEmpty = name[idx+1:] == "omitempty"
+ name = name[:idx]
+ }
+
+ if len(name) == 0 {
+ name = fld.Name
+ }
+
+ cs.fields = append(cs.fields, cachedField{idx: i, name: name, isAnonymous: fld.Anonymous, isOmitEmpty: isOmitEmpty})
+ }
+
+ sort.Sort(cs.fields)
+ s.Set(typ, cs)
+
+ s.lock.Unlock()
+
+ return cs
+}
diff --git a/vendor/github.com/go-playground/form/v4/decoder.go b/vendor/github.com/go-playground/form/v4/decoder.go
new file mode 100644
index 0000000..bdd664a
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/decoder.go
@@ -0,0 +1,747 @@
+package form
+
+import (
+ "fmt"
+ "log"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ errArraySize = "Array size of '%d' is larger than the maximum currently set on the decoder of '%d'. To increase this limit please see, SetMaxArraySize(size uint)"
+ errMissingStartBracket = "Invalid formatting for key '%s' missing '[' bracket"
+ errMissingEndBracket = "Invalid formatting for key '%s' missing ']' bracket"
+)
+
+type decoder struct {
+ d *Decoder
+ errs DecodeErrors
+ dm dataMap
+ values url.Values
+ maxKeyLen int
+ namespace []byte
+}
+
+func (d *decoder) setError(namespace []byte, err error) {
+ if d.errs == nil {
+ d.errs = make(DecodeErrors)
+ }
+ d.errs[string(namespace)] = err
+}
+
+func (d *decoder) findAlias(ns string) *recursiveData {
+ for i := 0; i < len(d.dm); i++ {
+ if d.dm[i].alias == ns {
+ return d.dm[i]
+ }
+ }
+ return nil
+}
+
+func (d *decoder) parseMapData() {
+ // already parsed
+ if len(d.dm) > 0 {
+ return
+ }
+
+ d.maxKeyLen = 0
+ d.dm = d.dm[0:0]
+
+ var i int
+ var idx int
+ var l int
+ var insideBracket bool
+ var rd *recursiveData
+ var isNum bool
+
+ for k := range d.values {
+
+ if len(k) > d.maxKeyLen {
+ d.maxKeyLen = len(k)
+ }
+
+ for i = 0; i < len(k); i++ {
+
+ switch k[i] {
+ case '[':
+ idx = i
+ insideBracket = true
+ isNum = true
+ case ']':
+
+ if !insideBracket {
+ log.Panicf(errMissingStartBracket, k)
+ }
+
+ if rd = d.findAlias(k[:idx]); rd == nil {
+
+ l = len(d.dm) + 1
+
+ if l > cap(d.dm) {
+ dm := make(dataMap, l)
+ copy(dm, d.dm)
+ rd = new(recursiveData)
+ dm[len(d.dm)] = rd
+ d.dm = dm
+ } else {
+ l = len(d.dm)
+ d.dm = d.dm[:l+1]
+ rd = d.dm[l]
+ rd.sliceLen = 0
+ rd.keys = rd.keys[0:0]
+ }
+
+ rd.alias = k[:idx]
+ }
+
+ // is map + key
+ ke := key{
+ ivalue: -1,
+ value: k[idx+1 : i],
+ searchValue: k[idx : i+1],
+ }
+
+ // is key is number, most likely array key, keep track of just in case an array/slice.
+ if isNum {
+
+ // no need to check for error, it will always pass
+ // as we have done the checking to ensure
+ // the value is a number ahead of time.
+ ke.ivalue, _ = strconv.Atoi(ke.value)
+
+ if ke.ivalue > rd.sliceLen {
+ rd.sliceLen = ke.ivalue
+
+ }
+ }
+
+ rd.keys = append(rd.keys, ke)
+
+ insideBracket = false
+ default:
+ // checking if not a number, 0-9 is 48-57 in byte, see for yourself fmt.Println('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
+ if insideBracket && (k[i] > 57 || k[i] < 48) {
+ isNum = false
+ }
+ }
+ }
+
+ // if still inside bracket, that means no ending bracket was ever specified
+ if insideBracket {
+ log.Panicf(errMissingEndBracket, k)
+ }
+ }
+}
+
+func (d *decoder) traverseStruct(v reflect.Value, typ reflect.Type, namespace []byte) (set bool) {
+
+ l := len(namespace)
+ first := l == 0
+
+ // anonymous structs will still work for caching as the whole definition is stored
+ // including tags
+ s, ok := d.d.structCache.Get(typ)
+ if !ok {
+ s = d.d.structCache.parseStruct(d.d.mode, v, typ, d.d.tagName)
+ }
+
+ for _, f := range s.fields {
+ namespace = namespace[:l]
+
+ if f.isAnonymous {
+ if d.setFieldByType(v.Field(f.idx), namespace, 0) {
+ set = true
+ }
+ }
+
+ if first {
+ namespace = append(namespace, f.name...)
+ } else {
+ namespace = append(namespace, namespaceSeparator)
+ namespace = append(namespace, f.name...)
+ }
+
+ if d.setFieldByType(v.Field(f.idx), namespace, 0) {
+ set = true
+ }
+ }
+
+ return
+}
+
+func (d *decoder) setFieldByType(current reflect.Value, namespace []byte, idx int) (set bool) {
+
+ var err error
+ v, kind := ExtractType(current)
+
+ arr, ok := d.values[string(namespace)]
+
+ if d.d.customTypeFuncs != nil {
+
+ if ok {
+ if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
+ val, err := cf(arr[idx:])
+ if err != nil {
+ d.setError(namespace, err)
+ return
+ }
+
+ v.Set(reflect.ValueOf(val))
+ set = true
+ return
+ }
+ }
+ }
+ switch kind {
+ case reflect.Interface:
+ if !ok || idx == len(arr) {
+ return
+ }
+ v.Set(reflect.ValueOf(arr[idx]))
+ set = true
+
+ case reflect.Ptr:
+ newVal := reflect.New(v.Type().Elem())
+ if set = d.setFieldByType(newVal.Elem(), namespace, idx); set {
+ v.Set(newVal)
+ }
+
+ case reflect.String:
+ if !ok || idx == len(arr) {
+ return
+ }
+ v.SetString(arr[idx])
+ set = true
+
+ case reflect.Uint, reflect.Uint64:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint8:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 8); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint16:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 16); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint32:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Int, reflect.Int64:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int8:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 8); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int16:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 16); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int32:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Float32:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var f float64
+ if f, err = strconv.ParseFloat(arr[idx], 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetFloat(f)
+ set = true
+
+ case reflect.Float64:
+ if !ok || idx == len(arr) || len(arr[idx]) == 0 {
+ return
+ }
+ var f float64
+ if f, err = strconv.ParseFloat(arr[idx], 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetFloat(f)
+ set = true
+
+ case reflect.Bool:
+ if !ok || idx == len(arr) {
+ return
+ }
+ var b bool
+ if b, err = parseBool(arr[idx]); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetBool(b)
+ set = true
+
+ case reflect.Slice:
+ d.parseMapData()
+ // slice elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
+
+ if ok && len(arr) > 0 {
+ var varr reflect.Value
+
+ var ol int
+ l := len(arr)
+
+ if v.IsNil() {
+ varr = reflect.MakeSlice(v.Type(), len(arr), len(arr))
+ } else {
+
+ ol = v.Len()
+ l += ol
+
+ if v.Cap() <= l {
+ varr = reflect.MakeSlice(v.Type(), l, l)
+ } else {
+ // preserve predefined capacity, possibly for reuse after decoding
+ varr = reflect.MakeSlice(v.Type(), l, v.Cap())
+ }
+ reflect.Copy(varr, v)
+ }
+
+ for i := ol; i < l; i++ {
+ newVal := reflect.New(v.Type().Elem()).Elem()
+
+ if d.setFieldByType(newVal, namespace, i-ol) {
+ set = true
+ varr.Index(i).Set(newVal)
+ }
+ }
+
+ v.Set(varr)
+ }
+
+ // maybe it's an numbered array i.e. Phone[0].Number
+ if rd := d.findAlias(string(namespace)); rd != nil {
+
+ var varr reflect.Value
+ var kv key
+
+ sl := rd.sliceLen + 1
+
+ // checking below for maxArraySize, but if array exists and already
+ // has sufficient capacity allocated then we do not check as the code
+ // obviously allows a capacity greater than the maxArraySize.
+
+ if v.IsNil() {
+
+ if sl > d.d.maxArraySize {
+ d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
+ return
+ }
+
+ varr = reflect.MakeSlice(v.Type(), sl, sl)
+
+ } else if v.Len() < sl {
+
+ if v.Cap() <= sl {
+
+ if sl > d.d.maxArraySize {
+ d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
+ return
+ }
+
+ varr = reflect.MakeSlice(v.Type(), sl, sl)
+ } else {
+ varr = reflect.MakeSlice(v.Type(), sl, v.Cap())
+ }
+
+ reflect.Copy(varr, v)
+
+ } else {
+ varr = v
+ }
+
+ for i := 0; i < len(rd.keys); i++ {
+
+ kv = rd.keys[i]
+ newVal := reflect.New(varr.Type().Elem()).Elem()
+
+ if kv.ivalue == -1 {
+ d.setError(namespace, fmt.Errorf("invalid slice index '%s'", kv.value))
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ varr.Index(kv.ivalue).Set(newVal)
+ }
+ }
+
+ if !set {
+ return
+ }
+
+ v.Set(varr)
+ }
+
+ case reflect.Array:
+ d.parseMapData()
+
+ // array elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
+
+ if ok && len(arr) > 0 {
+ var varr reflect.Value
+ l := len(arr)
+ overCapacity := v.Len() < l
+ if overCapacity {
+ // more values than array capacity, ignore values over capacity as it's possible some would just want
+ // to grab the first x number of elements; in the future strict mode logic should return an error
+ fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
+ }
+ varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
+ reflect.Copy(varr, v)
+
+ if v.Len() < len(arr) {
+ l = v.Len()
+ }
+ for i := 0; i < l; i++ {
+ newVal := reflect.New(v.Type().Elem()).Elem()
+
+ if d.setFieldByType(newVal, namespace, i) {
+ set = true
+ varr.Index(i).Set(newVal)
+ }
+ }
+ v.Set(varr)
+ }
+
+ // maybe it's an numbered array i.e. Phone[0].Number
+ if rd := d.findAlias(string(namespace)); rd != nil {
+ var varr reflect.Value
+ var kv key
+
+ overCapacity := rd.sliceLen >= v.Len()
+ if overCapacity {
+ // more values than array capacity, ignore values over capacity as it's possible some would just want
+ // to grab the first x number of elements; in the future strict mode logic should return an error
+ fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
+ }
+ varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
+ reflect.Copy(varr, v)
+
+ for i := 0; i < len(rd.keys); i++ {
+ kv = rd.keys[i]
+ if kv.ivalue >= v.Len() {
+ continue
+ }
+ newVal := reflect.New(varr.Type().Elem()).Elem()
+
+ if kv.ivalue == -1 {
+ d.setError(namespace, fmt.Errorf("invalid array index '%s'", kv.value))
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ varr.Index(kv.ivalue).Set(newVal)
+ }
+ }
+
+ if !set {
+ return
+ }
+ v.Set(varr)
+ }
+
+ case reflect.Map:
+ var rd *recursiveData
+
+ d.parseMapData()
+
+ // no natural map support so skip directly to dm lookup
+ if rd = d.findAlias(string(namespace)); rd == nil {
+ return
+ }
+
+ var existing bool
+ var kv key
+ var mp reflect.Value
+ var mk reflect.Value
+
+ typ := v.Type()
+
+ if v.IsNil() {
+ mp = reflect.MakeMap(typ)
+ } else {
+ existing = true
+ mp = v
+ }
+
+ for i := 0; i < len(rd.keys); i++ {
+ newVal := reflect.New(typ.Elem()).Elem()
+ mk = reflect.New(typ.Key()).Elem()
+ kv = rd.keys[i]
+
+ if err := d.getMapKey(kv.value, mk, namespace); err != nil {
+ d.setError(namespace, err)
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ mp.SetMapIndex(mk, newVal)
+ }
+ }
+
+ if !set || existing {
+ return
+ }
+
+ v.Set(mp)
+
+ case reflect.Struct:
+ typ := v.Type()
+
+ // if we get here then no custom time function declared so use RFC3339 by default
+ if typ == timeType {
+
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+
+ t, err := time.Parse(time.RFC3339, arr[idx])
+ if err != nil {
+ d.setError(namespace, err)
+ }
+
+ v.Set(reflect.ValueOf(t))
+ set = true
+ return
+ }
+
+ d.parseMapData()
+
+ // we must be recursing infinitly...but that's ok we caught it on the very first overun.
+ if len(namespace) > d.maxKeyLen {
+ return
+ }
+
+ set = d.traverseStruct(v, typ, namespace)
+ }
+ return
+}
+
+func (d *decoder) getMapKey(key string, current reflect.Value, namespace []byte) (err error) {
+
+ v, kind := ExtractType(current)
+
+ if d.d.customTypeFuncs != nil {
+ if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
+
+ val, er := cf([]string{key})
+ if er != nil {
+ err = er
+ return
+ }
+
+ v.Set(reflect.ValueOf(val))
+ return
+ }
+ }
+
+ switch kind {
+ case reflect.Interface:
+ // If interface would have been set on the struct before decoding,
+ // say to a struct value we would not get here but kind would be struct.
+ v.Set(reflect.ValueOf(key))
+ return
+ case reflect.Ptr:
+ newVal := reflect.New(v.Type().Elem())
+ if err = d.getMapKey(key, newVal.Elem(), namespace); err == nil {
+ v.Set(newVal)
+ }
+
+ case reflect.String:
+ v.SetString(key)
+
+ case reflect.Uint, reflect.Uint64:
+
+ u64, e := strconv.ParseUint(key, 10, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint8:
+
+ u64, e := strconv.ParseUint(key, 10, 8)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint16:
+
+ u64, e := strconv.ParseUint(key, 10, 16)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint32:
+
+ u64, e := strconv.ParseUint(key, 10, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Int, reflect.Int64:
+
+ i64, e := strconv.ParseInt(key, 10, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int8:
+
+ i64, e := strconv.ParseInt(key, 10, 8)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int16:
+
+ i64, e := strconv.ParseInt(key, 10, 16)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int32:
+
+ i64, e := strconv.ParseInt(key, 10, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Float32:
+
+ f, e := strconv.ParseFloat(key, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetFloat(f)
+
+ case reflect.Float64:
+
+ f, e := strconv.ParseFloat(key, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetFloat(f)
+
+ case reflect.Bool:
+
+ b, e := parseBool(key)
+ if e != nil {
+ err = fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetBool(b)
+
+ default:
+ err = fmt.Errorf("Unsupported Map Key '%s', Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/v4/doc.go b/vendor/github.com/go-playground/form/v4/doc.go
new file mode 100644
index 0000000..f553dac
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/doc.go
@@ -0,0 +1,275 @@
+/*
+Package form Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values.
+
+
+It has the following features:
+
+ - Primitives types cause zero allocations.
+ - Supports map of almost all types.
+ - Supports both Numbered and Normal arrays eg. "Array[0]" and just "Array"
+ with multiple values passed.
+ - Slice honours the specified index. eg. if "Slice[2]" is the only Slice
+ value passed down, it will be put at index 2; if slice isn't big enough
+ it will be expanded.
+ - Array honours the specified index. eg. if "Array[2]" is the only Array
+ value passed down, it will be put at index 2; if array isn't big enough
+ a warning will be printed and value ignored.
+ - Only creates objects as necessary eg. if no `array` or `map` values are
+ passed down, the `array` and `map` are left as their default values in
+ the struct.
+ - Allows for Custom Type registration.
+ - Handles time.Time using RFC3339 time format by default,
+ but can easily be changed by registering a Custom Type, see below.
+ - Handles Encoding & Decoding of almost all Go types eg. can Decode into
+ struct, array, map, int... and Encode a struct, array, map, int...
+
+Common Questions
+
+Questions
+
+ Does it support encoding.TextUnmarshaler?
+ No because TextUnmarshaler only accepts []byte but posted values can have
+ multiple values, so is not suitable.
+
+ Mixing array/slice with array[idx]/slice[idx], in which order are they parsed?
+ array/slice then array[idx]/slice[idx]
+
+Supported Types
+
+out of the box supported types
+
+ - string
+ - bool
+ - int, int8, int16, int32, int64
+ - uint, uint8, uint16, uint32, uint64
+ - float32, float64
+ - struct and anonymous struct
+ - interface{}
+ - time.Time` - by default using RFC3339
+ - a `pointer` to one of the above types
+ - slice, array
+ - map
+ - `custom types` can override any of the above types
+ - many other types may be supported inherently (eg. bson.ObjectId is
+ type ObjectId string, which will get populated by the string type
+
+ **NOTE**: map, struct and slice nesting are ad infinitum.
+
+Usage
+
+symbols
+
+ - Use symbol `.` for separating fields/structs. (eg. `structfield.field`)
+ - Use `[index or key]` for access to index of a slice/array or key for map.
+ (eg. `arrayfield[0]`, `mapfield[keyvalue]`)
+
+html
+
+
+
+Example
+
+example decoding the above HTML
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+ "net/url"
+
+ "github.com/go-playground/form/v4"
+ )
+
+ // Address contains address information
+ type Address struct {
+ Name string
+ Phone string
+ }
+
+ // User contains user information
+ type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+ }
+
+ // use a single instance of Decoder, it caches struct info
+ var decoder *form.Decoder
+
+ func main() {
+ decoder = form.NewDecoder()
+
+ // this simulates the results of http.Request's ParseForm() function
+ values := parseForm()
+
+ var user User
+
+ // must pass a pointer
+ err := decoder.Decode(&user, values)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", user)
+ }
+
+ // this simulates the results of http.Request's ParseForm() function
+ func parseForm() url.Values {
+ return url.Values{
+ "Name": []string{"joeybloggs"},
+ "Age": []string{"3"},
+ "Gender": []string{"Male"},
+ "Address[0].Name": []string{"26 Here Blvd."},
+ "Address[0].Phone": []string{"9(999)999-9999"},
+ "Address[1].Name": []string{"26 There Blvd."},
+ "Address[1].Phone": []string{"1(111)111-1111"},
+ "active": []string{"true"},
+ "MapExample[key]": []string{"value"},
+ "NestedMap[key][key]": []string{"value"},
+ "NestedArray[0][0]": []string{"value"},
+ }
+ }
+
+example encoding
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+
+ "github.com/go-playground/form/v4"
+ )
+
+ // Address contains address information
+ type Address struct {
+ Name string
+ Phone string
+ }
+
+ // User contains user information
+ type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+ }
+
+ // use a single instance of Encoder, it caches struct info
+ var encoder *form.Encoder
+
+ func main() {
+ encoder = form.NewEncoder()
+
+ user := User{
+ Name: "joeybloggs",
+ Age: 3,
+ Gender: "Male",
+ Address: []Address{
+ {Name: "26 Here Blvd.", Phone: "9(999)999-9999"},
+ {Name: "26 There Blvd.", Phone: "1(111)111-1111"},
+ },
+ Active: true,
+ MapExample: map[string]string{"key": "value"},
+ NestedMap: map[string]map[string]string{"key": {"key": "value"}},
+ NestedArray: [][]string{{"value"}},
+ }
+
+ // must pass a pointer
+ values, err := encoder.Encode(&user)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", values)
+ }
+
+
+Registering Custom Types
+
+Decoder
+
+ decoder.RegisterCustomTypeFunc(func(vals []string) (interface{}, error) {
+ return time.Parse("2006-01-02", vals[0])
+ }, time.Time{})
+
+ ADDITIONAL: if a struct type is registered, the function will only be called
+ if a url.Value exists for the struct and not just the struct fields
+ eg. url.Values{"User":"Name%3Djoeybloggs"} will call the custom type function
+ with 'User' as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+
+Encoder
+
+ encoder.RegisterCustomTypeFunc(func(x interface{}) ([]string, error) {
+ return []string{x.(time.Time).Format("2006-01-02")}, nil
+ }, time.Time{})
+
+
+Ignoring Fields
+
+you can tell form to ignore fields using `-` in the tag
+
+ type MyStruct struct {
+ Field string `form:"-"`
+ }
+
+Omitempty
+
+you can tell form to omit empty fields using `,omitempty` or `FieldName,omitempty` in the tag
+
+ type MyStruct struct {
+ Field string `form:",omitempty"`
+ Field2 string `form:"CustomFieldName,omitempty"`
+ }
+
+
+Notes
+
+To maximize compatibility with other systems the Encoder attempts
+to avoid using array indexes in url.Values if at all possible.
+
+ eg.
+
+ // A struct field of
+ Field []string{"1", "2", "3"}
+
+ // will be output a url.Value as
+ "Field": []string{"1", "2", "3"}
+
+ and not
+ "Field[0]": []string{"1"}
+ "Field[1]": []string{"2"}
+ "Field[2]": []string{"3"}
+
+ // however there are times where it is unavoidable, like with pointers
+ i := int(1)
+ Field []*string{nil, nil, &i}
+
+ // to avoid index 1 and 2 must use index
+ "Field[2]": []string{"1"}
+
+*/
+package form
diff --git a/vendor/github.com/go-playground/form/v4/encoder.go b/vendor/github.com/go-playground/form/v4/encoder.go
new file mode 100644
index 0000000..ff08ed0
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/encoder.go
@@ -0,0 +1,260 @@
+package form
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+type encoder struct {
+ e *Encoder
+ errs EncodeErrors
+ values url.Values
+ namespace []byte
+}
+
+func (e *encoder) setError(namespace []byte, err error) {
+ if e.errs == nil {
+ e.errs = make(EncodeErrors)
+ }
+
+ e.errs[string(namespace)] = err
+}
+
+func (e *encoder) setVal(namespace []byte, idx int, vals ...string) {
+
+ arr, ok := e.values[string(namespace)]
+ if ok {
+ arr = append(arr, vals...)
+ } else {
+ arr = vals
+ }
+
+ e.values[string(namespace)] = arr
+}
+
+func (e *encoder) traverseStruct(v reflect.Value, namespace []byte, idx int) {
+
+ typ := v.Type()
+ l := len(namespace)
+ first := l == 0
+
+ // anonymous structs will still work for caching as the whole definition is stored
+ // including tags
+ s, ok := e.e.structCache.Get(typ)
+ if !ok {
+ s = e.e.structCache.parseStruct(e.e.mode, v, typ, e.e.tagName)
+ }
+
+ for _, f := range s.fields {
+ namespace = namespace[:l]
+
+ if f.isAnonymous && e.e.embedAnonymous {
+ e.setFieldByType(v.Field(f.idx), namespace, idx, f.isOmitEmpty)
+ continue
+ }
+
+ if first {
+ namespace = append(namespace, f.name...)
+ } else {
+ namespace = append(namespace, namespaceSeparator)
+ namespace = append(namespace, f.name...)
+ }
+
+ e.setFieldByType(v.Field(f.idx), namespace, idx, f.isOmitEmpty)
+ }
+}
+
+func (e *encoder) setFieldByType(current reflect.Value, namespace []byte, idx int, isOmitEmpty bool) {
+
+ if idx > -1 && current.Kind() == reflect.Ptr {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ idx = -2
+ }
+
+ if isOmitEmpty && !hasValue(current) {
+ return
+ }
+ v, kind := ExtractType(current)
+
+ if e.e.customTypeFuncs != nil {
+
+ if cf, ok := e.e.customTypeFuncs[v.Type()]; ok {
+
+ arr, err := cf(v.Interface())
+ if err != nil {
+ e.setError(namespace, err)
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.setVal(namespace, idx, arr...)
+ return
+ }
+ }
+
+ switch kind {
+ case reflect.Ptr, reflect.Interface, reflect.Invalid:
+ return
+
+ case reflect.String:
+
+ e.setVal(namespace, idx, v.String())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+
+ e.setVal(namespace, idx, strconv.FormatUint(v.Uint(), 10))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ e.setVal(namespace, idx, strconv.FormatInt(v.Int(), 10))
+
+ case reflect.Float32:
+
+ e.setVal(namespace, idx, strconv.FormatFloat(v.Float(), 'f', -1, 32))
+
+ case reflect.Float64:
+
+ e.setVal(namespace, idx, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+
+ case reflect.Bool:
+
+ e.setVal(namespace, idx, strconv.FormatBool(v.Bool()))
+
+ case reflect.Slice, reflect.Array:
+
+ if idx == -1 {
+
+ for i := 0; i < v.Len(); i++ {
+ e.setFieldByType(v.Index(i), namespace, i, false)
+ }
+
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ namespace = append(namespace, '[')
+ l := len(namespace)
+
+ for i := 0; i < v.Len(); i++ {
+ namespace = namespace[:l]
+ namespace = strconv.AppendInt(namespace, int64(i), 10)
+ namespace = append(namespace, ']')
+ e.setFieldByType(v.Index(i), namespace, -2, false)
+ }
+
+ case reflect.Map:
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ var valid bool
+ var s string
+ l := len(namespace)
+
+ for _, key := range v.MapKeys() {
+
+ namespace = namespace[:l]
+
+ if s, valid = e.getMapKey(key, namespace); !valid {
+ continue
+ }
+
+ namespace = append(namespace, '[')
+ namespace = append(namespace, s...)
+ namespace = append(namespace, ']')
+
+ e.setFieldByType(v.MapIndex(key), namespace, -2, false)
+ }
+
+ case reflect.Struct:
+
+ // if we get here then no custom time function declared so use RFC3339 by default
+ if v.Type() == timeType {
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.setVal(namespace, idx, v.Interface().(time.Time).Format(time.RFC3339))
+ return
+ }
+
+ if idx == -1 {
+ e.traverseStruct(v, namespace, idx)
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.traverseStruct(v, namespace, -2)
+ }
+}
+
+func (e *encoder) getMapKey(key reflect.Value, namespace []byte) (string, bool) {
+
+ v, kind := ExtractType(key)
+
+ if e.e.customTypeFuncs != nil {
+
+ if cf, ok := e.e.customTypeFuncs[v.Type()]; ok {
+ arr, err := cf(v.Interface())
+ if err != nil {
+ e.setError(namespace, err)
+ return "", false
+ }
+
+ return arr[0], true
+ }
+ }
+
+ switch kind {
+ case reflect.Interface, reflect.Ptr:
+ return "", false
+
+ case reflect.String:
+ return v.String(), true
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(v.Uint(), 10), true
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(v.Int(), 10), true
+
+ case reflect.Float32:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 32), true
+
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64), true
+
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool()), true
+
+ default:
+ e.setError(namespace, fmt.Errorf("Unsupported Map Key '%v' Namespace '%s'", v.String(), namespace))
+ return "", false
+ }
+}
diff --git a/vendor/github.com/go-playground/form/v4/form.go b/vendor/github.com/go-playground/form/v4/form.go
new file mode 100644
index 0000000..0ce5917
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/form.go
@@ -0,0 +1,50 @@
+package form
+
+import (
+ "reflect"
+ "time"
+)
+
+const (
+ blank = ""
+ namespaceSeparator = '.'
+ ignore = "-"
+ fieldNS = "Field Namespace:"
+ errorText = " ERROR:"
+)
+
+var (
+ timeType = reflect.TypeOf(time.Time{})
+)
+
+// Mode specifies which mode the form decoder is to run
+type Mode uint8
+
+const (
+
+ // ModeImplicit tries to parse values for all
+ // fields that do not have an ignore '-' tag
+ ModeImplicit Mode = iota
+
+ // ModeExplicit only parses values for field with a field tag
+ // and that tag is not the ignore '-' tag
+ ModeExplicit
+)
+
+// AnonymousMode specifies how data should be rolled up
+// or separated from anonymous structs
+type AnonymousMode uint8
+
+const (
+ // AnonymousEmbed embeds anonymous data when encoding
+ // eg. type A struct { Field string }
+ // type B struct { A, Field string }
+ // encode results: url.Values{"Field":[]string{"B FieldVal", "A FieldVal"}}
+ AnonymousEmbed AnonymousMode = iota
+
+ // AnonymousSeparate does not embed anonymous data when encoding
+ // eg. type A struct { Field string }
+ // type B struct { A, Field string }
+ // encode results: url.Values{"Field":[]string{"B FieldVal"}, "A.Field":[]string{"A FieldVal"}}
+ AnonymousSeparate
+)
diff --git a/vendor/github.com/go-playground/form/v4/form_decoder.go b/vendor/github.com/go-playground/form/v4/form_decoder.go
new file mode 100644
index 0000000..fe8b36f
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/form_decoder.go
@@ -0,0 +1,174 @@
+package form
+
+import (
+ "bytes"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DecodeCustomTypeFunc allows for registering/overriding types to be parsed.
+type DecodeCustomTypeFunc func([]string) (interface{}, error)
+
+// DecodeErrors is a map of errors encountered during form decoding
+type DecodeErrors map[string]error
+
+func (d DecodeErrors) Error() string {
+ buff := bytes.NewBufferString(blank)
+
+ for k, err := range d {
+ buff.WriteString(fieldNS)
+ buff.WriteString(k)
+ buff.WriteString(errorText)
+ buff.WriteString(err.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// An InvalidDecoderError describes an invalid argument passed to Decode.
+// (The argument passed to Decode must be a non-nil pointer.)
+type InvalidDecoderError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidDecoderError) Error() string {
+
+ if e.Type == nil {
+ return "form: Decode(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "form: Decode(non-pointer " + e.Type.String() + ")"
+ }
+
+ return "form: Decode(nil " + e.Type.String() + ")"
+}
+
+type key struct {
+ ivalue int
+ value string
+ searchValue string
+}
+
+type recursiveData struct {
+ alias string
+ sliceLen int
+ keys []key
+}
+
+type dataMap []*recursiveData
+
+// Decoder is the main decode instance
+type Decoder struct {
+ tagName string
+ mode Mode
+ structCache *structCacheMap
+ customTypeFuncs map[reflect.Type]DecodeCustomTypeFunc
+ maxArraySize int
+ dataPool *sync.Pool
+}
+
+// NewDecoder creates a new decoder instance with sane defaults
+func NewDecoder() *Decoder {
+
+ d := &Decoder{
+ tagName: "form",
+ mode: ModeImplicit,
+ structCache: newStructCacheMap(),
+ maxArraySize: 10000,
+ }
+
+ d.dataPool = &sync.Pool{New: func() interface{} {
+ return &decoder{
+ d: d,
+ namespace: make([]byte, 0, 64),
+ }
+ }}
+
+ return d
+}
+
+// SetTagName sets the given tag name to be used by the decoder.
+// Default is "form"
+func (d *Decoder) SetTagName(tagName string) {
+ d.tagName = tagName
+}
+
+// SetMode sets the mode the decoder should run
+// Default is ModeImplicit
+func (d *Decoder) SetMode(mode Mode) {
+ d.mode = mode
+}
+
+// SetMaxArraySize sets maximum array size that can be created.
+// This limit is for the array indexing this library supports to
+// avoid potential DOS or man-in-the-middle attacks using an unusually
+// high number.
+// DEFAULT: 10000
+func (d *Decoder) SetMaxArraySize(size uint) {
+ d.maxArraySize = int(size)
+}
+
+// RegisterTagNameFunc registers a custom tag name parser function
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: once a custom function has been registered the default, or custom set, tag name is ignored
+// and relies 100% on the function for the name data. The return value WILL BE CACHED and so return value
+// must be consistent.
+func (d *Decoder) RegisterTagNameFunc(fn TagNameFunc) {
+ d.structCache.tagFn = fn
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types.
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: if a struct type is registered, the function will only be called if a url.Value exists for
+// the struct and not just the struct fields eg. url.Values{"User":"Name%3Djoeybloggs"} will call the
+// custom type function with `User` as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+func (d *Decoder) RegisterCustomTypeFunc(fn DecodeCustomTypeFunc, types ...interface{}) {
+
+ if d.customTypeFuncs == nil {
+ d.customTypeFuncs = map[reflect.Type]DecodeCustomTypeFunc{}
+ }
+
+ for _, t := range types {
+ d.customTypeFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// Decode parses the given values and sets the corresponding struct and/or type values
+//
+// Decode returns an InvalidDecoderError if interface passed is invalid.
+func (d *Decoder) Decode(v interface{}, values url.Values) (err error) {
+
+ val := reflect.ValueOf(v)
+
+ if val.Kind() != reflect.Ptr || val.IsNil() {
+ return &InvalidDecoderError{reflect.TypeOf(v)}
+ }
+
+ dec := d.dataPool.Get().(*decoder)
+ dec.values = values
+ dec.dm = dec.dm[0:0]
+
+ val = val.Elem()
+ typ := val.Type()
+
+ if val.Kind() == reflect.Struct && typ != timeType {
+ dec.traverseStruct(val, typ, dec.namespace[0:0])
+ } else {
+ dec.setFieldByType(val, dec.namespace[0:0], 0)
+ }
+
+ if len(dec.errs) > 0 {
+ err = dec.errs
+ dec.errs = nil
+ }
+
+ d.dataPool.Put(dec)
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/v4/form_encoder.go b/vendor/github.com/go-playground/form/v4/form_encoder.go
new file mode 100644
index 0000000..ad3cc10
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/form_encoder.go
@@ -0,0 +1,144 @@
+package form
+
+import (
+ "bytes"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// EncodeCustomTypeFunc allows for registering/overriding types to be parsed.
+type EncodeCustomTypeFunc func(x interface{}) ([]string, error)
+
+// EncodeErrors is a map of errors encountered during form encoding
+type EncodeErrors map[string]error
+
+func (e EncodeErrors) Error() string {
+ buff := bytes.NewBufferString(blank)
+
+ for k, err := range e {
+ buff.WriteString(fieldNS)
+ buff.WriteString(k)
+ buff.WriteString(errorText)
+ buff.WriteString(err.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// An InvalidEncodeError describes an invalid argument passed to Encode.
+type InvalidEncodeError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidEncodeError) Error() string {
+
+ if e.Type == nil {
+ return "form: Encode(nil)"
+ }
+
+ return "form: Encode(nil " + e.Type.String() + ")"
+}
+
+// Encoder is the main encode instance
+type Encoder struct {
+ tagName string
+ structCache *structCacheMap
+ customTypeFuncs map[reflect.Type]EncodeCustomTypeFunc
+ dataPool *sync.Pool
+ mode Mode
+ embedAnonymous bool
+}
+
+// NewEncoder creates a new encoder instance with sane defaults
+func NewEncoder() *Encoder {
+
+ e := &Encoder{
+ tagName: "form",
+ mode: ModeImplicit,
+ structCache: newStructCacheMap(),
+ embedAnonymous: true,
+ }
+
+ e.dataPool = &sync.Pool{New: func() interface{} {
+ return &encoder{
+ e: e,
+ namespace: make([]byte, 0, 64),
+ }
+ }}
+
+ return e
+}
+
+// SetTagName sets the given tag name to be used by the encoder.
+// Default is "form"
+func (e *Encoder) SetTagName(tagName string) {
+ e.tagName = tagName
+}
+
+// SetMode sets the mode the encoder should run
+// Default is ModeImplicit
+func (e *Encoder) SetMode(mode Mode) {
+ e.mode = mode
+}
+
+// SetAnonymousMode sets the mode the encoder should run
+// Default is AnonymousEmbed
+func (e *Encoder) SetAnonymousMode(mode AnonymousMode) {
+ e.embedAnonymous = mode == AnonymousEmbed
+}
+
+// RegisterTagNameFunc registers a custom tag name parser function
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: once a custom function has been registered the default, or custom set, tag name is ignored
+// and relies 100% on the function for the name data. The return value WILL BE CACHED and so return value
+// must be consistent.
+func (e *Encoder) RegisterTagNameFunc(fn TagNameFunc) {
+ e.structCache.tagFn = fn
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any parsing
+func (e *Encoder) RegisterCustomTypeFunc(fn EncodeCustomTypeFunc, types ...interface{}) {
+
+ if e.customTypeFuncs == nil {
+ e.customTypeFuncs = map[reflect.Type]EncodeCustomTypeFunc{}
+ }
+
+ for _, t := range types {
+ e.customTypeFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// Encode encodes the given values and sets the corresponding struct values
+func (e *Encoder) Encode(v interface{}) (values url.Values, err error) {
+
+ val, kind := ExtractType(reflect.ValueOf(v))
+
+ if kind == reflect.Ptr || kind == reflect.Interface || kind == reflect.Invalid {
+ return nil, &InvalidEncodeError{reflect.TypeOf(v)}
+ }
+
+ enc := e.dataPool.Get().(*encoder)
+ enc.values = make(url.Values)
+
+ if kind == reflect.Struct && val.Type() != timeType {
+ enc.traverseStruct(val, enc.namespace[0:0], -1)
+ } else {
+ enc.setFieldByType(val, enc.namespace[0:0], -1, false)
+ }
+
+ if len(enc.errs) > 0 {
+ err = enc.errs
+ enc.errs = nil
+ }
+
+ values = enc.values
+
+ e.dataPool.Put(enc)
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/v4/go.mod b/vendor/github.com/go-playground/form/v4/go.mod
new file mode 100644
index 0000000..f88a222
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-playground/form/v4
+
+go 1.13
+
+require github.com/go-playground/assert/v2 v2.0.1
diff --git a/vendor/github.com/go-playground/form/v4/go.sum b/vendor/github.com/go-playground/form/v4/go.sum
new file mode 100644
index 0000000..69d5932
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/go.sum
@@ -0,0 +1,2 @@
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
diff --git a/vendor/github.com/go-playground/form/v4/logo.jpg b/vendor/github.com/go-playground/form/v4/logo.jpg
new file mode 100644
index 0000000..2ef34f8
Binary files /dev/null and b/vendor/github.com/go-playground/form/v4/logo.jpg differ
diff --git a/vendor/github.com/go-playground/form/v4/util.go b/vendor/github.com/go-playground/form/v4/util.go
new file mode 100644
index 0000000..13db877
--- /dev/null
+++ b/vendor/github.com/go-playground/form/v4/util.go
@@ -0,0 +1,56 @@
+package form
+
+import (
+ "reflect"
+ "strconv"
+)
+
+// ExtractType gets the actual underlying type of field value.
+// it is exposed for use within you Custom Functions
+func ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) {
+
+ switch current.Kind() {
+ case reflect.Ptr:
+
+ if current.IsNil() {
+ return current, reflect.Ptr
+ }
+
+ return ExtractType(current.Elem())
+
+ case reflect.Interface:
+
+ if current.IsNil() {
+ return current, reflect.Interface
+ }
+
+ return ExtractType(current.Elem())
+
+ default:
+ return current, current.Kind()
+ }
+}
+
+func parseBool(str string) (bool, error) {
+
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "on", "yes", "ok":
+ return true, nil
+ case "", "0", "f", "F", "false", "FALSE", "False", "off", "no":
+ return false, nil
+ }
+
+ // strconv.NumError mimicing exactly the strconv.ParseBool(..) error and type
+ // to ensure compatibility with std library and beyond.
+ return false, &strconv.NumError{Func: "ParseBool", Num: str, Err: strconv.ErrSyntax}
+}
+
+// hasValue determines if a reflect.Value is it's default value
+func hasValue(field reflect.Value) bool {
+ switch field.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return !field.IsNil()
+ default:
+ return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+ }
+}
diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml
new file mode 100644
index 0000000..d50237a
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/.travis.yml
@@ -0,0 +1,26 @@
+language: go
+go:
+ - 1.13.1
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE
new file mode 100644
index 0000000..75854ac
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md
new file mode 100644
index 0000000..ba1b068
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/README.md
@@ -0,0 +1,172 @@
+## locales
+

+[](https://travis-ci.org/go-playground/locales)
+[](https://goreportcard.com/report/github.com/go-playground/locales)
+[](https://godoc.org/github.com/go-playground/locales)
+
+[](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
+an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
+
+Features
+--------
+- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1
+- [x] Contains Cardinal, Ordinal and Range Plural Rules
+- [x] Contains Month, Weekday and Timezone translations built in
+- [x] Contains Date & Time formatting functions
+- [x] Contains Number, Currency, Accounting and Percent formatting functions
+- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
+
+Full Tests
+--------------------
+I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
+any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
+
+Installation
+-----------
+
+Use go get
+
+```shell
+go get github.com/go-playground/locales
+```
+
+NOTES
+--------
+You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
+of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
+
+Usage
+-------
+```go
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-playground/locales/currency"
+ "github.com/go-playground/locales/en_CA"
+)
+
+func main() {
+
+ loc, _ := time.LoadLocation("America/Toronto")
+ datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
+
+ l := en_CA.New()
+
+ // Dates
+ fmt.Println(l.FmtDateFull(datetime))
+ fmt.Println(l.FmtDateLong(datetime))
+ fmt.Println(l.FmtDateMedium(datetime))
+ fmt.Println(l.FmtDateShort(datetime))
+
+ // Times
+ fmt.Println(l.FmtTimeFull(datetime))
+ fmt.Println(l.FmtTimeLong(datetime))
+ fmt.Println(l.FmtTimeMedium(datetime))
+ fmt.Println(l.FmtTimeShort(datetime))
+
+ // Months Wide
+ fmt.Println(l.MonthWide(time.January))
+ fmt.Println(l.MonthWide(time.February))
+ fmt.Println(l.MonthWide(time.March))
+ // ...
+
+ // Months Abbreviated
+ fmt.Println(l.MonthAbbreviated(time.January))
+ fmt.Println(l.MonthAbbreviated(time.February))
+ fmt.Println(l.MonthAbbreviated(time.March))
+ // ...
+
+ // Months Narrow
+ fmt.Println(l.MonthNarrow(time.January))
+ fmt.Println(l.MonthNarrow(time.February))
+ fmt.Println(l.MonthNarrow(time.March))
+ // ...
+
+ // Weekdays Wide
+ fmt.Println(l.WeekdayWide(time.Sunday))
+ fmt.Println(l.WeekdayWide(time.Monday))
+ fmt.Println(l.WeekdayWide(time.Tuesday))
+ // ...
+
+ // Weekdays Abbreviated
+ fmt.Println(l.WeekdayAbbreviated(time.Sunday))
+ fmt.Println(l.WeekdayAbbreviated(time.Monday))
+ fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
+ // ...
+
+ // Weekdays Short
+ fmt.Println(l.WeekdayShort(time.Sunday))
+ fmt.Println(l.WeekdayShort(time.Monday))
+ fmt.Println(l.WeekdayShort(time.Tuesday))
+ // ...
+
+ // Weekdays Narrow
+ fmt.Println(l.WeekdayNarrow(time.Sunday))
+ fmt.Println(l.WeekdayNarrow(time.Monday))
+ fmt.Println(l.WeekdayNarrow(time.Tuesday))
+ // ...
+
+ var f64 float64
+
+ f64 = -10356.4523
+
+ // Number
+ fmt.Println(l.FmtNumber(f64, 2))
+
+ // Currency
+ fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
+ fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
+
+ // Accounting
+ fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
+ fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
+
+ f64 = 78.12
+
+ // Percent
+ fmt.Println(l.FmtPercent(f64, 0))
+
+ // Plural Rules for locale, so you know what rules you must cover
+ fmt.Println(l.PluralsCardinal())
+ fmt.Println(l.PluralsOrdinal())
+
+ // Cardinal Plural Rules
+ fmt.Println(l.CardinalPluralRule(1, 0))
+ fmt.Println(l.CardinalPluralRule(1.0, 0))
+ fmt.Println(l.CardinalPluralRule(1.0, 1))
+ fmt.Println(l.CardinalPluralRule(3, 0))
+
+ // Ordinal Plural Rules
+ fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
+ fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
+ fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
+ fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
+
+ // Range Plural Rules
+ fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
+ fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
+ fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
+}
+```
+
+NOTES:
+-------
+These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
+I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
+these locales are regenerated the fix will come with.
+
+I do however realize that time constraints are often important and so there are two options:
+
+1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
+2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
+
+Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go
new file mode 100644
index 0000000..cdaba59
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/currency/currency.go
@@ -0,0 +1,308 @@
+package currency
+
+// Type is the currency type associated with the locales currency enum
+type Type int
+
+// locale currencies
+const (
+ ADP Type = iota
+ AED
+ AFA
+ AFN
+ ALK
+ ALL
+ AMD
+ ANG
+ AOA
+ AOK
+ AON
+ AOR
+ ARA
+ ARL
+ ARM
+ ARP
+ ARS
+ ATS
+ AUD
+ AWG
+ AZM
+ AZN
+ BAD
+ BAM
+ BAN
+ BBD
+ BDT
+ BEC
+ BEF
+ BEL
+ BGL
+ BGM
+ BGN
+ BGO
+ BHD
+ BIF
+ BMD
+ BND
+ BOB
+ BOL
+ BOP
+ BOV
+ BRB
+ BRC
+ BRE
+ BRL
+ BRN
+ BRR
+ BRZ
+ BSD
+ BTN
+ BUK
+ BWP
+ BYB
+ BYN
+ BYR
+ BZD
+ CAD
+ CDF
+ CHE
+ CHF
+ CHW
+ CLE
+ CLF
+ CLP
+ CNH
+ CNX
+ CNY
+ COP
+ COU
+ CRC
+ CSD
+ CSK
+ CUC
+ CUP
+ CVE
+ CYP
+ CZK
+ DDM
+ DEM
+ DJF
+ DKK
+ DOP
+ DZD
+ ECS
+ ECV
+ EEK
+ EGP
+ ERN
+ ESA
+ ESB
+ ESP
+ ETB
+ EUR
+ FIM
+ FJD
+ FKP
+ FRF
+ GBP
+ GEK
+ GEL
+ GHC
+ GHS
+ GIP
+ GMD
+ GNF
+ GNS
+ GQE
+ GRD
+ GTQ
+ GWE
+ GWP
+ GYD
+ HKD
+ HNL
+ HRD
+ HRK
+ HTG
+ HUF
+ IDR
+ IEP
+ ILP
+ ILR
+ ILS
+ INR
+ IQD
+ IRR
+ ISJ
+ ISK
+ ITL
+ JMD
+ JOD
+ JPY
+ KES
+ KGS
+ KHR
+ KMF
+ KPW
+ KRH
+ KRO
+ KRW
+ KWD
+ KYD
+ KZT
+ LAK
+ LBP
+ LKR
+ LRD
+ LSL
+ LTL
+ LTT
+ LUC
+ LUF
+ LUL
+ LVL
+ LVR
+ LYD
+ MAD
+ MAF
+ MCF
+ MDC
+ MDL
+ MGA
+ MGF
+ MKD
+ MKN
+ MLF
+ MMK
+ MNT
+ MOP
+ MRO
+ MTL
+ MTP
+ MUR
+ MVP
+ MVR
+ MWK
+ MXN
+ MXP
+ MXV
+ MYR
+ MZE
+ MZM
+ MZN
+ NAD
+ NGN
+ NIC
+ NIO
+ NLG
+ NOK
+ NPR
+ NZD
+ OMR
+ PAB
+ PEI
+ PEN
+ PES
+ PGK
+ PHP
+ PKR
+ PLN
+ PLZ
+ PTE
+ PYG
+ QAR
+ RHD
+ ROL
+ RON
+ RSD
+ RUB
+ RUR
+ RWF
+ SAR
+ SBD
+ SCR
+ SDD
+ SDG
+ SDP
+ SEK
+ SGD
+ SHP
+ SIT
+ SKK
+ SLL
+ SOS
+ SRD
+ SRG
+ SSP
+ STD
+ STN
+ SUR
+ SVC
+ SYP
+ SZL
+ THB
+ TJR
+ TJS
+ TMM
+ TMT
+ TND
+ TOP
+ TPE
+ TRL
+ TRY
+ TTD
+ TWD
+ TZS
+ UAH
+ UAK
+ UGS
+ UGX
+ USD
+ USN
+ USS
+ UYI
+ UYP
+ UYU
+ UZS
+ VEB
+ VEF
+ VND
+ VNN
+ VUV
+ WST
+ XAF
+ XAG
+ XAU
+ XBA
+ XBB
+ XBC
+ XBD
+ XCD
+ XDR
+ XEU
+ XFO
+ XFU
+ XOF
+ XPD
+ XPF
+ XPT
+ XRE
+ XSU
+ XTS
+ XUA
+ XXX
+ YDD
+ YER
+ YUD
+ YUM
+ YUN
+ YUR
+ ZAL
+ ZAR
+ ZMK
+ ZMW
+ ZRN
+ ZRZ
+ ZWD
+ ZWL
+ ZWR
+)
diff --git a/vendor/github.com/go-playground/locales/go.mod b/vendor/github.com/go-playground/locales/go.mod
new file mode 100644
index 0000000..34ab6f2
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-playground/locales
+
+go 1.13
+
+require golang.org/x/text v0.3.2
diff --git a/vendor/github.com/go-playground/locales/go.sum b/vendor/github.com/go-playground/locales/go.sum
new file mode 100644
index 0000000..63c9200
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/go.sum
@@ -0,0 +1,3 @@
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png
new file mode 100644
index 0000000..3038276
Binary files /dev/null and b/vendor/github.com/go-playground/locales/logo.png differ
diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go
new file mode 100644
index 0000000..9202900
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/rules.go
@@ -0,0 +1,293 @@
+package locales
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/go-playground/locales/currency"
+)
+
+// // ErrBadNumberValue is returned when the number passed for
+// // plural rule determination cannot be parsed
+// type ErrBadNumberValue struct {
+// NumberValue string
+// InnerError error
+// }
+
+// // Error returns ErrBadNumberValue error string
+// func (e *ErrBadNumberValue) Error() string {
+// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
+// }
+
+// var _ error = new(ErrBadNumberValue)
+
+// PluralRule denotes the type of plural rules
+type PluralRule int
+
+// PluralRule's
+const (
+ PluralRuleUnknown PluralRule = iota
+ PluralRuleZero // zero
+ PluralRuleOne // one - singular
+ PluralRuleTwo // two - dual
+ PluralRuleFew // few - paucal
+ PluralRuleMany // many - also used for fractions if they have a separate class
+ PluralRuleOther // other - required—general plural form—also used if the language only has a single form
+)
+
+const (
+ pluralsString = "UnknownZeroOneTwoFewManyOther"
+)
+
+// Translator encapsulates an instance of a locale
+// NOTE: some values are returned as a []byte just in case the caller
+// wishes to add more and can help avoid allocations; otherwise just cast as string
+type Translator interface {
+
+ // The following Functions are for overriding, debugging or developing
+ // with a Translator Locale
+
+ // Locale returns the string value of the translator
+ Locale() string
+
+ // returns an array of cardinal plural rules associated
+ // with this translator
+ PluralsCardinal() []PluralRule
+
+ // returns an array of ordinal plural rules associated
+ // with this translator
+ PluralsOrdinal() []PluralRule
+
+ // returns an array of range plural rules associated
+ // with this translator
+ PluralsRange() []PluralRule
+
+ // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
+ CardinalPluralRule(num float64, v uint64) PluralRule
+
+ // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
+ OrdinalPluralRule(num float64, v uint64) PluralRule
+
+ // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
+ RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
+
+ // returns the locales abbreviated month given the 'month' provided
+ MonthAbbreviated(month time.Month) string
+
+ // returns the locales abbreviated months
+ MonthsAbbreviated() []string
+
+ // returns the locales narrow month given the 'month' provided
+ MonthNarrow(month time.Month) string
+
+ // returns the locales narrow months
+ MonthsNarrow() []string
+
+ // returns the locales wide month given the 'month' provided
+ MonthWide(month time.Month) string
+
+ // returns the locales wide months
+ MonthsWide() []string
+
+ // returns the locales abbreviated weekday given the 'weekday' provided
+ WeekdayAbbreviated(weekday time.Weekday) string
+
+ // returns the locales abbreviated weekdays
+ WeekdaysAbbreviated() []string
+
+ // returns the locales narrow weekday given the 'weekday' provided
+ WeekdayNarrow(weekday time.Weekday) string
+
+ // WeekdaysNarrowreturns the locales narrow weekdays
+ WeekdaysNarrow() []string
+
+ // returns the locales short weekday given the 'weekday' provided
+ WeekdayShort(weekday time.Weekday) string
+
+ // returns the locales short weekdays
+ WeekdaysShort() []string
+
+ // returns the locales wide weekday given the 'weekday' provided
+ WeekdayWide(weekday time.Weekday) string
+
+ // returns the locales wide weekdays
+ WeekdaysWide() []string
+
+ // The following Functions are common Formatting functionsfor the Translator's Locale
+
+ // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
+ FmtNumber(num float64, v uint64) string
+
+ // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
+ // NOTE: 'num' passed into FmtPercent is assumed to be in percent already
+ FmtPercent(num float64, v uint64) string
+
+ // returns the currency representation of 'num' with digits/precision of 'v' for locale
+ FmtCurrency(num float64, v uint64, currency currency.Type) string
+
+ // returns the currency representation of 'num' with digits/precision of 'v' for locale
+ // in accounting notation.
+ FmtAccounting(num float64, v uint64, currency currency.Type) string
+
+ // returns the short date representation of 't' for locale
+ FmtDateShort(t time.Time) string
+
+ // returns the medium date representation of 't' for locale
+ FmtDateMedium(t time.Time) string
+
+ // returns the long date representation of 't' for locale
+ FmtDateLong(t time.Time) string
+
+ // returns the full date representation of 't' for locale
+ FmtDateFull(t time.Time) string
+
+ // returns the short time representation of 't' for locale
+ FmtTimeShort(t time.Time) string
+
+ // returns the medium time representation of 't' for locale
+ FmtTimeMedium(t time.Time) string
+
+ // returns the long time representation of 't' for locale
+ FmtTimeLong(t time.Time) string
+
+ // returns the full time representation of 't' for locale
+ FmtTimeFull(t time.Time) string
+}
+
+// String returns the string value of PluralRule
+func (p PluralRule) String() string {
+
+ switch p {
+ case PluralRuleZero:
+ return pluralsString[7:11]
+ case PluralRuleOne:
+ return pluralsString[11:14]
+ case PluralRuleTwo:
+ return pluralsString[14:17]
+ case PluralRuleFew:
+ return pluralsString[17:20]
+ case PluralRuleMany:
+ return pluralsString[20:24]
+ case PluralRuleOther:
+ return pluralsString[24:]
+ default:
+ return pluralsString[:7]
+ }
+}
+
+//
+// Precision Notes:
+//
+// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
+//
+// v := float64(3.141)
+// i := float64(int64(v))
+//
+// fmt.Println(v - i)
+//
+// or
+//
+// s := strconv.FormatFloat(v-i, 'f', -1, 64)
+// fmt.Println(s)
+//
+// these will not print what you'd expect: 0.14100000000000001
+// and so this library requires a precision to be specified, or
+// inaccurate plural rules could be applied.
+//
+//
+//
+// n - absolute value of the source number (integer and decimals).
+// i - integer digits of n.
+// v - number of visible fraction digits in n, with trailing zeros.
+// w - number of visible fraction digits in n, without trailing zeros.
+// f - visible fractional digits in n, with trailing zeros.
+// t - visible fractional digits in n, without trailing zeros.
+//
+//
+// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
+//
+// n := math.Abs(num)
+// i := int64(n)
+// v := v
+//
+//
+// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
+// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
+// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
+//
+//
+//
+// General Inclusion Rules
+// - v will always be available inherently
+// - all require n
+// - w requires i
+//
+
+// W returns the number of visible fraction digits in N, without trailing zeros.
+func W(n float64, v uint64) (w int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then w will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ s = s[2:]
+ end := len(s) + 1
+
+ for i := end; i >= 0; i-- {
+ if s[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+
+ w = int64(len(s[:end]))
+ }
+
+ return
+}
+
+// F returns the visible fractional digits in N, with trailing zeros.
+func F(n float64, v uint64) (f int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then f will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ // ignoring error, because it can't fail as we generated
+ // the string internally from a real number
+ f, _ = strconv.ParseInt(s[2:], 10, 64)
+ }
+
+ return
+}
+
+// T returns the visible fractional digits in N, without trailing zeros.
+func T(n float64, v uint64) (t int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then t will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ s = s[2:]
+ end := len(s) + 1
+
+ for i := end; i >= 0; i-- {
+ if s[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+
+ // ignoring error, because it can't fail as we generated
+ // the string internally from a real number
+ t, _ = strconv.ParseInt(s[:end], 10, 64)
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore
new file mode 100644
index 0000000..bc4e07f
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.coverprofile
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml
new file mode 100644
index 0000000..39b8b92
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/.travis.yml
@@ -0,0 +1,27 @@
+language: go
+go:
+ - 1.13.4
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ [ $TRAVIS_GO_VERSION = 1.13.4 ] &&
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE
new file mode 100644
index 0000000..8d8aba1
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md
new file mode 100644
index 0000000..071f33a
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/README.md
@@ -0,0 +1,89 @@
+## universal-translator
+

+[](https://travis-ci.org/go-playground/universal-translator)
+[](https://coveralls.io/github/go-playground/universal-translator)
+[](https://goreportcard.com/report/github.com/go-playground/universal-translator)
+[](https://godoc.org/github.com/go-playground/universal-translator)
+
+[](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
+
+Why another i18n library?
+--------------------------
+Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
+so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
+is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
+use in your applications.
+
+Features
+--------
+- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3
+- [x] Contains Cardinal, Ordinal and Range Plural Rules
+- [x] Contains Month, Weekday and Timezone translations built in
+- [x] Contains Date & Time formatting functions
+- [x] Contains Number, Currency, Accounting and Percent formatting functions
+- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
+- [x] Support loading translations from files
+- [x] Exporting translations to file(s), mainly for getting them professionally translated
+- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
+- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
+
+Installation
+-----------
+
+Use go get
+
+```shell
+go get github.com/go-playground/universal-translator
+```
+
+Usage & Documentation
+-------
+
+Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
+
+##### Examples:
+
+- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic)
+- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files)
+- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files)
+
+File formatting
+--------------
+All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s);
+they are only separated for easy viewing.
+
+##### Examples:
+
+- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
+
+##### Basic Makeup
+NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
+```json
+{
+ "locale": "en",
+ "key": "days-left",
+ "trans": "You have {0} day left.",
+ "type": "Cardinal",
+ "rule": "One",
+ "override": false
+}
+```
+|Field|Description|
+|---|---|
+|locale|The locale for which the translation is for.|
+|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
+|trans|The actual translation text.|
+|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
+|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
+|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
+
+Help With Tests
+---------------
+To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
+Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go
new file mode 100644
index 0000000..38b163b
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/errors.go
@@ -0,0 +1,148 @@
+package ut
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/go-playground/locales"
+)
+
+var (
+ // ErrUnknowTranslation indicates the translation could not be found
+ ErrUnknowTranslation = errors.New("Unknown Translation")
+)
+
+var _ error = new(ErrConflictingTranslation)
+var _ error = new(ErrRangeTranslation)
+var _ error = new(ErrOrdinalTranslation)
+var _ error = new(ErrCardinalTranslation)
+var _ error = new(ErrMissingPluralTranslation)
+var _ error = new(ErrExistingTranslator)
+
+// ErrExistingTranslator is the error representing a conflicting translator
+type ErrExistingTranslator struct {
+ locale string
+}
+
+// Error returns ErrExistingTranslator's internal error text
+func (e *ErrExistingTranslator) Error() string {
+ return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
+}
+
+// ErrConflictingTranslation is the error representing a conflicting translation
+type ErrConflictingTranslation struct {
+ locale string
+ key interface{}
+ rule locales.PluralRule
+ text string
+}
+
+// Error returns ErrConflictingTranslation's internal error text
+func (e *ErrConflictingTranslation) Error() string {
+
+ if _, ok := e.key.(string); !ok {
+ return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
+ }
+
+ return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
+}
+
+// ErrRangeTranslation is the error representing a range translation error
+type ErrRangeTranslation struct {
+ text string
+}
+
+// Error returns ErrRangeTranslation's internal error text
+func (e *ErrRangeTranslation) Error() string {
+ return e.text
+}
+
+// ErrOrdinalTranslation is the error representing an ordinal translation error
+type ErrOrdinalTranslation struct {
+ text string
+}
+
+// Error returns ErrOrdinalTranslation's internal error text
+func (e *ErrOrdinalTranslation) Error() string {
+ return e.text
+}
+
+// ErrCardinalTranslation is the error representing a cardinal translation error
+type ErrCardinalTranslation struct {
+ text string
+}
+
+// Error returns ErrCardinalTranslation's internal error text
+func (e *ErrCardinalTranslation) Error() string {
+ return e.text
+}
+
+// ErrMissingPluralTranslation is the error signifying a missing translation given
+// the locales plural rules.
+type ErrMissingPluralTranslation struct {
+ locale string
+ key interface{}
+ rule locales.PluralRule
+ translationType string
+}
+
+// Error returns ErrMissingPluralTranslation's internal error text
+func (e *ErrMissingPluralTranslation) Error() string {
+
+ if _, ok := e.key.(string); !ok {
+ return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
+ }
+
+ return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
+}
+
+// ErrMissingBracket is the error representing a missing bracket in a translation
+// eg. This is a {0 <-- missing ending '}'
+type ErrMissingBracket struct {
+ locale string
+ key interface{}
+ text string
+}
+
+// Error returns ErrMissingBracket error message
+func (e *ErrMissingBracket) Error() string {
+ return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
+}
+
+// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
+// eg. This is a {must-be-int}
+type ErrBadParamSyntax struct {
+ locale string
+ param string
+ key interface{}
+ text string
+}
+
+// Error returns ErrBadParamSyntax error message
+func (e *ErrBadParamSyntax) Error() string {
+ return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
+}
+
+// import/export errors
+
+// ErrMissingLocale is the error representing an expected locale that could
+// not be found aka locale not registered with the UniversalTranslator Instance
+type ErrMissingLocale struct {
+ locale string
+}
+
+// Error returns ErrMissingLocale's internal error text
+func (e *ErrMissingLocale) Error() string {
+ return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
+}
+
+// ErrBadPluralDefinition is the error representing an incorrect plural definition
+// usually found within translations defined within files during the import process.
+type ErrBadPluralDefinition struct {
+ tl translation
+}
+
+// Error returns ErrBadPluralDefinition's internal error text
+func (e *ErrBadPluralDefinition) Error() string {
+ return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
+}
diff --git a/vendor/github.com/go-playground/universal-translator/go.mod b/vendor/github.com/go-playground/universal-translator/go.mod
new file mode 100644
index 0000000..8079590
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-playground/universal-translator
+
+go 1.13
+
+require github.com/go-playground/locales v0.13.0
diff --git a/vendor/github.com/go-playground/universal-translator/go.sum b/vendor/github.com/go-playground/universal-translator/go.sum
new file mode 100644
index 0000000..cbbf324
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/go.sum
@@ -0,0 +1,4 @@
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go
new file mode 100644
index 0000000..7bd76f2
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/import_export.go
@@ -0,0 +1,274 @@
+package ut
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "io"
+
+ "github.com/go-playground/locales"
+)
+
+type translation struct {
+ Locale string `json:"locale"`
+ Key interface{} `json:"key"` // either string or integer
+ Translation string `json:"trans"`
+ PluralType string `json:"type,omitempty"`
+ PluralRule string `json:"rule,omitempty"`
+ OverrideExisting bool `json:"override,omitempty"`
+}
+
+const (
+ cardinalType = "Cardinal"
+ ordinalType = "Ordinal"
+ rangeType = "Range"
+)
+
+// ImportExportFormat is the format of the file import or export
+type ImportExportFormat uint8
+
+// supported Export Formats
+const (
+ FormatJSON ImportExportFormat = iota
+)
+
+// Export writes the translations out to a file on disk.
+//
+// NOTE: this currently only works with string or int translations keys.
+func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
+
+ _, err := os.Stat(dirname)
+ fmt.Println(dirname, err, os.IsNotExist(err))
+ if err != nil {
+
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ if err = os.MkdirAll(dirname, 0744); err != nil {
+ return err
+ }
+ }
+
+ // build up translations
+ var trans []translation
+ var b []byte
+ var ext string
+
+ for _, locale := range t.translators {
+
+ for k, v := range locale.(*translator).translations {
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k,
+ Translation: v.text,
+ })
+ }
+
+ for k, pluralTrans := range locale.(*translator).cardinalTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: cardinalType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ for k, pluralTrans := range locale.(*translator).ordinalTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: ordinalType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ for k, pluralTrans := range locale.(*translator).rangeTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: rangeType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ switch format {
+ case FormatJSON:
+ b, err = json.MarshalIndent(trans, "", " ")
+ ext = ".json"
+ }
+
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
+ if err != nil {
+ return err
+ }
+
+ trans = trans[0:0]
+ }
+
+ return nil
+}
+
+// Import reads the translations out of a file or directory on disk.
+//
+// NOTE: this currently only works with string or int translations keys.
+func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
+
+ fi, err := os.Stat(dirnameOrFilename)
+ if err != nil {
+ return err
+ }
+
+ processFn := func(filename string) error {
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return t.ImportByReader(format, f)
+ }
+
+ if !fi.IsDir() {
+ return processFn(dirnameOrFilename)
+ }
+
+ // recursively go through directory
+ walker := func(path string, info os.FileInfo, err error) error {
+
+ if info.IsDir() {
+ return nil
+ }
+
+ switch format {
+ case FormatJSON:
+ // skip non JSON files
+ if filepath.Ext(info.Name()) != ".json" {
+ return nil
+ }
+ }
+
+ return processFn(path)
+ }
+
+ return filepath.Walk(dirnameOrFilename, walker)
+}
+
+// ImportByReader imports the the translations found within the contents read from the supplied reader.
+//
+// NOTE: generally used when assets have been embedded into the binary and are already in memory.
+func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
+
+ b, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+
+ var trans []translation
+
+ switch format {
+ case FormatJSON:
+ err = json.Unmarshal(b, &trans)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ for _, tl := range trans {
+
+ locale, found := t.FindTranslator(tl.Locale)
+ if !found {
+ return &ErrMissingLocale{locale: tl.Locale}
+ }
+
+ pr := stringToPR(tl.PluralRule)
+
+ if pr == locales.PluralRuleUnknown {
+
+ err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
+ if err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ switch tl.PluralType {
+ case cardinalType:
+ err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ case ordinalType:
+ err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ case rangeType:
+ err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ default:
+ return &ErrBadPluralDefinition{tl: tl}
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func stringToPR(s string) locales.PluralRule {
+
+ switch s {
+ case "One":
+ return locales.PluralRuleOne
+ case "Two":
+ return locales.PluralRuleTwo
+ case "Few":
+ return locales.PluralRuleFew
+ case "Many":
+ return locales.PluralRuleMany
+ case "Other":
+ return locales.PluralRuleOther
+ default:
+ return locales.PluralRuleUnknown
+ }
+
+}
diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png
new file mode 100644
index 0000000..a37aa8c
Binary files /dev/null and b/vendor/github.com/go-playground/universal-translator/logo.png differ
diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go
new file mode 100644
index 0000000..cfafce8
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/translator.go
@@ -0,0 +1,420 @@
+package ut
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/go-playground/locales"
+)
+
+const (
+ paramZero = "{0}"
+ paramOne = "{1}"
+ unknownTranslation = ""
+)
+
+// Translator is universal translators
+// translator instance which is a thin wrapper
+// around locales.Translator instance providing
+// some extra functionality
+type Translator interface {
+ locales.Translator
+
+ // adds a normal translation for a particular language/locale
+ // {#} is the only replacement type accepted and are ad infinitum
+ // eg. one: '{0} day left' other: '{0} days left'
+ Add(key interface{}, text string, override bool) error
+
+ // adds a cardinal plural translation for a particular language/locale
+ // {0} is the only replacement type accepted and only one variable is accepted as
+ // multiple cannot be used for a plural rule determination, unless it is a range;
+ // see AddRange below.
+ // eg. in locale 'en' one: '{0} day left' other: '{0} days left'
+ AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // adds an ordinal plural translation for a particular language/locale
+ // {0} is the only replacement type accepted and only one variable is accepted as
+ // multiple cannot be used for a plural rule determination, unless it is a range;
+ // see AddRange below.
+ // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
+ // - 1st, 2nd, 3rd...
+ AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // adds a range plural translation for a particular language/locale
+ // {0} and {1} are the only replacement types accepted and only these are accepted.
+ // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
+ AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // creates the translation for the locale given the 'key' and params passed in
+ T(key interface{}, params ...string) (string, error)
+
+ // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
+ // and param passed in
+ C(key interface{}, num float64, digits uint64, param string) (string, error)
+
+ // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
+ // and param passed in
+ O(key interface{}, num float64, digits uint64, param string) (string, error)
+
+ // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
+ // 'digit2' arguments and 'param1' and 'param2' passed in
+ R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
+
+ // VerifyTranslations checks to ensures that no plural rules have been
+ // missed within the translations.
+ VerifyTranslations() error
+}
+
+var _ Translator = new(translator)
+var _ locales.Translator = new(translator)
+
+type translator struct {
+ locales.Translator
+ translations map[interface{}]*transText
+ cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
+ ordinalTanslations map[interface{}][]*transText
+ rangeTanslations map[interface{}][]*transText
+}
+
+type transText struct {
+ text string
+ indexes []int
+}
+
+func newTranslator(trans locales.Translator) Translator {
+ return &translator{
+ Translator: trans,
+ translations: make(map[interface{}]*transText), // translation text broken up by byte index
+ cardinalTanslations: make(map[interface{}][]*transText),
+ ordinalTanslations: make(map[interface{}][]*transText),
+ rangeTanslations: make(map[interface{}][]*transText),
+ }
+}
+
+// Add adds a normal translation for a particular language/locale
+// {#} is the only replacement type accepted and are ad infinitum
+// eg. one: '{0} day left' other: '{0} days left'
+func (t *translator) Add(key interface{}, text string, override bool) error {
+
+ if _, ok := t.translations[key]; ok && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
+ }
+
+ lb := strings.Count(text, "{")
+ rb := strings.Count(text, "}")
+
+ if lb != rb {
+ return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
+ }
+
+ trans := &transText{
+ text: text,
+ }
+
+ var idx int
+
+ for i := 0; i < lb; i++ {
+ s := "{" + strconv.Itoa(i) + "}"
+ idx = strings.Index(text, s)
+ if idx == -1 {
+ return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
+ }
+
+ trans.indexes = append(trans.indexes, idx)
+ trans.indexes = append(trans.indexes, idx+len(s))
+ }
+
+ t.translations[key] = trans
+
+ return nil
+}
+
+// AddCardinal adds a cardinal plural translation for a particular language/locale
+// {0} is the only replacement type accepted and only one variable is accepted as
+// multiple cannot be used for a plural rule determination, unless it is a range;
+// see AddRange below.
+// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
+func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsCardinal() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.cardinalTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.cardinalTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 2, 2),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ return nil
+}
+
+// AddOrdinal adds an ordinal plural translation for a particular language/locale
+// {0} is the only replacement type accepted and only one variable is accepted as
+// multiple cannot be used for a plural rule determination, unless it is a range;
+// see AddRange below.
+// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
+func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsOrdinal() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.ordinalTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.ordinalTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 2, 2),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ return nil
+}
+
+// AddRange adds a range plural translation for a particular language/locale
+// {0} and {1} are the only replacement types accepted and only these are accepted.
+// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
+func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsRange() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.rangeTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.rangeTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 4, 4),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ idx = strings.Index(text, paramOne)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
+ }
+
+ trans.indexes[2] = idx
+ trans.indexes[3] = idx + len(paramOne)
+
+ return nil
+}
+
+// T creates the translation for the locale given the 'key' and params passed in
+func (t *translator) T(key interface{}, params ...string) (string, error) {
+
+ trans, ok := t.translations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ b := make([]byte, 0, 64)
+
+ var start, end, count int
+
+ for i := 0; i < len(trans.indexes); i++ {
+ end = trans.indexes[i]
+ b = append(b, trans.text[start:end]...)
+ b = append(b, params[count]...)
+ i++
+ start = trans.indexes[i]
+ count++
+ }
+
+ b = append(b, trans.text[start:]...)
+
+ return string(b), nil
+}
+
+// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
+func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
+
+ tarr, ok := t.cardinalTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.CardinalPluralRule(num, digits)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param...)
+ b = append(b, trans.text[trans.indexes[1]:]...)
+
+ return string(b), nil
+}
+
+// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
+func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
+
+ tarr, ok := t.ordinalTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.OrdinalPluralRule(num, digits)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param...)
+ b = append(b, trans.text[trans.indexes[1]:]...)
+
+ return string(b), nil
+}
+
+// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
+// and 'param1' and 'param2' passed in
+func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
+
+ tarr, ok := t.rangeTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.RangePluralRule(num1, digits1, num2, digits2)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param1...)
+ b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
+ b = append(b, param2...)
+ b = append(b, trans.text[trans.indexes[3]:]...)
+
+ return string(b), nil
+}
+
+// VerifyTranslations checks to ensures that no plural rules have been
+// missed within the translations.
+func (t *translator) VerifyTranslations() error {
+
+ for k, v := range t.cardinalTanslations {
+
+ for _, rule := range t.PluralsCardinal() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
+ }
+ }
+ }
+
+ for k, v := range t.ordinalTanslations {
+
+ for _, rule := range t.PluralsOrdinal() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
+ }
+ }
+ }
+
+ for k, v := range t.rangeTanslations {
+
+ for _, rule := range t.PluralsRange() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go
new file mode 100644
index 0000000..dbf707f
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go
@@ -0,0 +1,113 @@
+package ut
+
+import (
+ "strings"
+
+ "github.com/go-playground/locales"
+)
+
+// UniversalTranslator holds all locale & translation data
+type UniversalTranslator struct {
+ translators map[string]Translator
+ fallback Translator
+}
+
+// New returns a new UniversalTranslator instance set with
+// the fallback locale and locales it should support
+func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
+
+ t := &UniversalTranslator{
+ translators: make(map[string]Translator),
+ }
+
+ for _, v := range supportedLocales {
+
+ trans := newTranslator(v)
+ t.translators[strings.ToLower(trans.Locale())] = trans
+
+ if fallback.Locale() == v.Locale() {
+ t.fallback = trans
+ }
+ }
+
+ if t.fallback == nil && fallback != nil {
+ t.fallback = newTranslator(fallback)
+ }
+
+ return t
+}
+
+// FindTranslator trys to find a Translator based on an array of locales
+// and returns the first one it can find, otherwise returns the
+// fallback translator.
+func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
+
+ for _, locale := range locales {
+
+ if trans, found = t.translators[strings.ToLower(locale)]; found {
+ return
+ }
+ }
+
+ return t.fallback, false
+}
+
+// GetTranslator returns the specified translator for the given locale,
+// or fallback if not found
+func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
+
+ if trans, found = t.translators[strings.ToLower(locale)]; found {
+ return
+ }
+
+ return t.fallback, false
+}
+
+// GetFallback returns the fallback locale
+func (t *UniversalTranslator) GetFallback() Translator {
+ return t.fallback
+}
+
+// AddTranslator adds the supplied translator, if it already exists the override param
+// will be checked and if false an error will be returned, otherwise the translator will be
+// overridden; if the fallback matches the supplied translator it will be overridden as well
+// NOTE: this is normally only used when translator is embedded within a library
+func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
+
+ lc := strings.ToLower(translator.Locale())
+ _, ok := t.translators[lc]
+ if ok && !override {
+ return &ErrExistingTranslator{locale: translator.Locale()}
+ }
+
+ trans := newTranslator(translator)
+
+ if t.fallback.Locale() == translator.Locale() {
+
+ // because it's optional to have a fallback, I don't impose that limitation
+ // don't know why you wouldn't but...
+ if !override {
+ return &ErrExistingTranslator{locale: translator.Locale()}
+ }
+
+ t.fallback = trans
+ }
+
+ t.translators[lc] = trans
+
+ return nil
+}
+
+// VerifyTranslations runs through all locales and identifies any issues
+// eg. missing plural rules for a locale
+func (t *UniversalTranslator) VerifyTranslations() (err error) {
+
+ for _, trans := range t.translators {
+ err = trans.VerifyTranslations()
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore
new file mode 100644
index 0000000..6e43fac
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/.gitignore
@@ -0,0 +1,30 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+bin
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.test
+*.out
+*.txt
+cover.html
+README.html
diff --git a/vendor/github.com/go-playground/validator/v10/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE
new file mode 100644
index 0000000..6a2ae9a
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dean Karn
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
new file mode 100644
index 0000000..b809c4c
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
@@ -0,0 +1,16 @@
+## Maintainers Guide
+
+### Semantic Versioning
+Semantic versioning as defined [here](https://semver.org) must be strictly adhered to.
+
+### External Dependencies
+Any new external dependencies MUST:
+- Have a compatible LICENSE present.
+- Be actively maintained.
+- Be approved by @go-playground/admins
+
+### PR Merge Requirements
+- Up-to-date branch.
+- Passing tests and linting.
+- CODEOWNERS approval.
+- Tests that cover both the Happy and Unhappy paths.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile
new file mode 100644
index 0000000..164e8bb
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/Makefile
@@ -0,0 +1,18 @@
+GOCMD=GO111MODULE=on go
+
+linters-install:
+ @golangci-lint --version >/dev/null 2>&1 || { \
+ echo "installing linting tools..."; \
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.39.0; \
+ }
+
+lint: linters-install
+ golangci-lint run
+
+test:
+ $(GOCMD) test -cover -race ./...
+
+bench:
+ $(GOCMD) test -bench=. -benchmem ./...
+
+.PHONY: test lint linters-install
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
new file mode 100644
index 0000000..15d9fbb
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -0,0 +1,304 @@
+Package validator
+=================
+
[](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+[](https://travis-ci.org/go-playground/validator)
+[](https://coveralls.io/github/go-playground/validator?branch=master)
+[](https://goreportcard.com/report/github.com/go-playground/validator)
+[](https://pkg.go.dev/github.com/go-playground/validator/v10)
+
+
+Package validator implements value validations for structs and individual fields based on tags.
+
+It has the following **unique** features:
+
+- Cross Field and Cross Struct validations by using validation tags or custom validators.
+- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
+- Ability to dive into both map keys and values for validation
+- Handles type interface by determining it's underlying type prior to validation.
+- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
+- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
+- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
+- Customizable i18n aware error messages.
+- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding)
+
+Installation
+------------
+
+Use go get.
+
+ go get github.com/go-playground/validator
+
+Then import the validator package into your own code.
+
+ import "github.com/go-playground/validator"
+
+Error Return Value
+-------
+
+Validation functions return type error
+
+They return type error to avoid the issue discussed in the following, where err is always != nil:
+
+* http://stackoverflow.com/a/29138676/3158232
+* https://github.com/go-playground/validator/issues/134
+
+Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
+
+```go
+err := validate.Struct(mystruct)
+validationErrors := err.(validator.ValidationErrors)
+ ```
+
+Usage and documentation
+------
+
+Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs.
+
+##### Examples:
+
+- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go)
+- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go)
+- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go)
+- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go)
+- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
+- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
+
+Baked-in Validations
+------
+
+### Fields:
+
+| Tag | Description |
+| - | - |
+| eqcsfield | Field Equals Another Field (relative)|
+| eqfield | Field Equals Another Field |
+| fieldcontains | NOT DOCUMENTED IN doc.go |
+| fieldexcludes | NOT DOCUMENTED IN doc.go |
+| gtcsfield | Field Greater Than Another Relative Field |
+| gtecsfield | Field Greater Than or Equal To Another Relative Field |
+| gtefield | Field Greater Than or Equal To Another Field |
+| gtfield | Field Greater Than Another Field |
+| ltcsfield | Less Than Another Relative Field |
+| ltecsfield | Less Than or Equal To Another Relative Field |
+| ltefield | Less Than or Equal To Another Field |
+| ltfield | Less Than Another Field |
+| necsfield | Field Does Not Equal Another Field (relative) |
+| nefield | Field Does Not Equal Another Field |
+
+### Network:
+
+| Tag | Description |
+| - | - |
+| cidr | Classless Inter-Domain Routing CIDR |
+| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
+| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
+| datauri | Data URL |
+| fqdn | Full Qualified Domain Name (FQDN) |
+| hostname | Hostname RFC 952 |
+| hostname_port | HostPort |
+| hostname_rfc1123 | Hostname RFC 1123 |
+| ip | Internet Protocol Address IP |
+| ip4_addr | Internet Protocol Address IPv4 |
+| ip6_addr |Internet Protocol Address IPv6 |
+| ip_addr | Internet Protocol Address IP |
+| ipv4 | Internet Protocol Address IPv4 |
+| ipv6 | Internet Protocol Address IPv6 |
+| mac | Media Access Control Address MAC |
+| tcp4_addr | Transmission Control Protocol Address TCPv4 |
+| tcp6_addr | Transmission Control Protocol Address TCPv6 |
+| tcp_addr | Transmission Control Protocol Address TCP |
+| udp4_addr | User Datagram Protocol Address UDPv4 |
+| udp6_addr | User Datagram Protocol Address UDPv6 |
+| udp_addr | User Datagram Protocol Address UDP |
+| unix_addr | Unix domain socket end point Address |
+| uri | URI String |
+| url | URL String |
+| url_encoded | URL Encoded |
+| urn_rfc2141 | Urn RFC 2141 String |
+
+### Strings:
+
+| Tag | Description |
+| - | - |
+| alpha | Alpha Only |
+| alphanum | Alphanumeric |
+| alphanumunicode | Alphanumeric Unicode |
+| alphaunicode | Alpha Unicode |
+| ascii | ASCII |
+| contains | Contains |
+| containsany | Contains Any |
+| containsrune | Contains Rune |
+| endswith | Ends With |
+| lowercase | Lowercase |
+| multibyte | Multi-Byte Characters |
+| number | NOT DOCUMENTED IN doc.go |
+| numeric | Numeric |
+| printascii | Printable ASCII |
+| startswith | Starts With |
+| uppercase | Uppercase |
+
+### Format:
+| Tag | Description |
+| - | - |
+| base64 | Base64 String |
+| base64url | Base64URL String |
+| btc_addr | Bitcoin Address |
+| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
+| datetime | Datetime |
+| e164 | e164 formatted phone number |
+| email | E-mail String
+| eth_addr | Ethereum Address |
+| hexadecimal | Hexadecimal String |
+| hexcolor | Hexcolor String |
+| hsl | HSL String |
+| hsla | HSLA String |
+| html | HTML Tags |
+| html_encoded | HTML Encoded |
+| isbn | International Standard Book Number |
+| isbn10 | International Standard Book Number 10 |
+| isbn13 | International Standard Book Number 13 |
+| json | JSON |
+| latitude | Latitude |
+| longitude | Longitude |
+| rgb | RGB String |
+| rgba | RGBA String |
+| ssn | Social Security Number SSN |
+| uuid | Universally Unique Identifier UUID |
+| uuid3 | Universally Unique Identifier UUID v3 |
+| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
+| uuid4 | Universally Unique Identifier UUID v4 |
+| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
+| uuid5 | Universally Unique Identifier UUID v5 |
+| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
+| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
+
+### Comparisons:
+| Tag | Description |
+| - | - |
+| eq | Equals |
+| gt | Greater than|
+| gte |Greater than or equal |
+| lt | Less Than |
+| lte | Less Than or Equal |
+| ne | Not Equal |
+
+### Other:
+| Tag | Description |
+| - | - |
+| dir | Directory |
+| endswith | Ends With |
+| excludes | Excludes |
+| excludesall | Excludes All |
+| excludesrune | Excludes Rune |
+| file | File path |
+| isdefault | Is Default |
+| len | Length |
+| max | Maximum |
+| min | Minimum |
+| oneof | One Of |
+| required | Required |
+| required_if | Required If |
+| required_unless | Required Unless |
+| required_with | Required With |
+| required_with_all | Required With All |
+| required_without | Required Without |
+| required_without_all | Required Without All |
+| excluded_with | Excluded With |
+| excluded_with_all | Excluded With All |
+| excluded_without | Excluded Without |
+| excluded_without_all | Excluded Without All |
+| unique | Unique |
+
+Benchmarks
+------
+###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
+```go
+goos: darwin
+goarch: amd64
+pkg: github.com/go-playground/validator
+BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
+BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
+BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
+BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
+BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
+BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
+BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
+BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
+BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
+BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
+BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
+BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
+BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
+BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
+BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
+BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
+BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
+BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
+BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
+BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
+BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
+BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
+BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
+BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
+BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
+BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
+BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
+BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
+BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
+BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
+BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
+BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
+BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
+BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
+BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
+BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
+```
+
+Complementary Software
+----------------------
+
+Here is a list of software that complements using this library either pre or post validation.
+
+* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support.
+* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects
+
+How to Contribute
+------
+
+Make a pull request...
+
+License
+-------
+Distributed under MIT License, please see license file within the code for more details.
+
+Maintainers
+-----------
+This project has grown large enough that more than one person is required to properly support the community.
+If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
new file mode 100644
index 0000000..c46e6cd
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -0,0 +1,2365 @@
+package validator
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/sha3"
+ "golang.org/x/text/language"
+
+ urn "github.com/leodido/go-urn"
+)
+
+// Func accepts a FieldLevel interface for all validation needs. The return
+// value should be true when validation succeeds.
+type Func func(fl FieldLevel) bool
+
+// FuncCtx accepts a context.Context and FieldLevel interface for all
+// validation needs. The return value should be true when validation succeeds.
+type FuncCtx func(ctx context.Context, fl FieldLevel) bool
+
+// wrapFunc wraps noramal Func makes it compatible with FuncCtx
+func wrapFunc(fn Func) FuncCtx {
+ if fn == nil {
+ return nil // be sure not to wrap a bad function.
+ }
+ return func(ctx context.Context, fl FieldLevel) bool {
+ return fn(fl)
+ }
+}
+
+var (
+ restrictedTags = map[string]struct{}{
+ diveTag: {},
+ keysTag: {},
+ endKeysTag: {},
+ structOnlyTag: {},
+ omitempty: {},
+ skipValidationTag: {},
+ utf8HexComma: {},
+ utf8Pipe: {},
+ noStructLevelTag: {},
+ requiredTag: {},
+ isdefault: {},
+ }
+
+ // BakedInAliasValidators is a default mapping of a single validation tag that
+ // defines a common or complex set of validation(s) to simplify
+ // adding validation to structs.
+ bakedInAliases = map[string]string{
+ "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
+ "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
+ }
+
+ // BakedInValidators is the default map of ValidationFunc
+ // you can add, remove or even replace items to suite your needs,
+ // or even disregard and use your own map if so desired.
+ bakedInValidators = map[string]Func{
+ "required": hasValue,
+ "required_if": requiredIf,
+ "required_unless": requiredUnless,
+ "required_with": requiredWith,
+ "required_with_all": requiredWithAll,
+ "required_without": requiredWithout,
+ "required_without_all": requiredWithoutAll,
+ "excluded_with": excludedWith,
+ "excluded_with_all": excludedWithAll,
+ "excluded_without": excludedWithout,
+ "excluded_without_all": excludedWithoutAll,
+ "isdefault": isDefault,
+ "len": hasLengthOf,
+ "min": hasMinOf,
+ "max": hasMaxOf,
+ "eq": isEq,
+ "ne": isNe,
+ "lt": isLt,
+ "lte": isLte,
+ "gt": isGt,
+ "gte": isGte,
+ "eqfield": isEqField,
+ "eqcsfield": isEqCrossStructField,
+ "necsfield": isNeCrossStructField,
+ "gtcsfield": isGtCrossStructField,
+ "gtecsfield": isGteCrossStructField,
+ "ltcsfield": isLtCrossStructField,
+ "ltecsfield": isLteCrossStructField,
+ "nefield": isNeField,
+ "gtefield": isGteField,
+ "gtfield": isGtField,
+ "ltefield": isLteField,
+ "ltfield": isLtField,
+ "fieldcontains": fieldContains,
+ "fieldexcludes": fieldExcludes,
+ "alpha": isAlpha,
+ "alphanum": isAlphanum,
+ "alphaunicode": isAlphaUnicode,
+ "alphanumunicode": isAlphanumUnicode,
+ "numeric": isNumeric,
+ "number": isNumber,
+ "hexadecimal": isHexadecimal,
+ "hexcolor": isHEXColor,
+ "rgb": isRGB,
+ "rgba": isRGBA,
+ "hsl": isHSL,
+ "hsla": isHSLA,
+ "e164": isE164,
+ "email": isEmail,
+ "url": isURL,
+ "uri": isURI,
+ "urn_rfc2141": isUrnRFC2141, // RFC 2141
+ "file": isFile,
+ "base64": isBase64,
+ "base64url": isBase64URL,
+ "contains": contains,
+ "containsany": containsAny,
+ "containsrune": containsRune,
+ "excludes": excludes,
+ "excludesall": excludesAll,
+ "excludesrune": excludesRune,
+ "startswith": startsWith,
+ "endswith": endsWith,
+ "startsnotwith": startsNotWith,
+ "endsnotwith": endsNotWith,
+ "isbn": isISBN,
+ "isbn10": isISBN10,
+ "isbn13": isISBN13,
+ "eth_addr": isEthereumAddress,
+ "btc_addr": isBitcoinAddress,
+ "btc_addr_bech32": isBitcoinBech32Address,
+ "uuid": isUUID,
+ "uuid3": isUUID3,
+ "uuid4": isUUID4,
+ "uuid5": isUUID5,
+ "uuid_rfc4122": isUUIDRFC4122,
+ "uuid3_rfc4122": isUUID3RFC4122,
+ "uuid4_rfc4122": isUUID4RFC4122,
+ "uuid5_rfc4122": isUUID5RFC4122,
+ "ascii": isASCII,
+ "printascii": isPrintableASCII,
+ "multibyte": hasMultiByteCharacter,
+ "datauri": isDataURI,
+ "latitude": isLatitude,
+ "longitude": isLongitude,
+ "ssn": isSSN,
+ "ipv4": isIPv4,
+ "ipv6": isIPv6,
+ "ip": isIP,
+ "cidrv4": isCIDRv4,
+ "cidrv6": isCIDRv6,
+ "cidr": isCIDR,
+ "tcp4_addr": isTCP4AddrResolvable,
+ "tcp6_addr": isTCP6AddrResolvable,
+ "tcp_addr": isTCPAddrResolvable,
+ "udp4_addr": isUDP4AddrResolvable,
+ "udp6_addr": isUDP6AddrResolvable,
+ "udp_addr": isUDPAddrResolvable,
+ "ip4_addr": isIP4AddrResolvable,
+ "ip6_addr": isIP6AddrResolvable,
+ "ip_addr": isIPAddrResolvable,
+ "unix_addr": isUnixAddrResolvable,
+ "mac": isMAC,
+ "hostname": isHostnameRFC952, // RFC 952
+ "hostname_rfc1123": isHostnameRFC1123, // RFC 1123
+ "fqdn": isFQDN,
+ "unique": isUnique,
+ "oneof": isOneOf,
+ "html": isHTML,
+ "html_encoded": isHTMLEncoded,
+ "url_encoded": isURLEncoded,
+ "dir": isDir,
+ "json": isJSON,
+ "hostname_port": isHostnamePort,
+ "lowercase": isLowercase,
+ "uppercase": isUppercase,
+ "datetime": isDatetime,
+ "timezone": isTimeZone,
+ "iso3166_1_alpha2": isIso3166Alpha2,
+ "iso3166_1_alpha3": isIso3166Alpha3,
+ "iso3166_1_alpha_numeric": isIso3166AlphaNumeric,
+ "bcp47_language_tag": isBCP47LanguageTag,
+ "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2,
+ "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field,
+ "bic": isIsoBicFormat,
+ }
+)
+
+var oneofValsCache = map[string][]string{}
+var oneofValsCacheRWLock = sync.RWMutex{}
+
+func parseOneOfParam2(s string) []string {
+ oneofValsCacheRWLock.RLock()
+ vals, ok := oneofValsCache[s]
+ oneofValsCacheRWLock.RUnlock()
+ if !ok {
+ oneofValsCacheRWLock.Lock()
+ vals = splitParamsRegex.FindAllString(s, -1)
+ for i := 0; i < len(vals); i++ {
+ vals[i] = strings.Replace(vals[i], "'", "", -1)
+ }
+ oneofValsCache[s] = vals
+ oneofValsCacheRWLock.Unlock()
+ }
+ return vals
+}
+
+func isURLEncoded(fl FieldLevel) bool {
+ return uRLEncodedRegex.MatchString(fl.Field().String())
+}
+
+func isHTMLEncoded(fl FieldLevel) bool {
+ return hTMLEncodedRegex.MatchString(fl.Field().String())
+}
+
+func isHTML(fl FieldLevel) bool {
+ return hTMLRegex.MatchString(fl.Field().String())
+}
+
+func isOneOf(fl FieldLevel) bool {
+ vals := parseOneOfParam2(fl.Param())
+
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+ for i := 0; i < len(vals); i++ {
+ if vals[i] == v {
+ return true
+ }
+ }
+ return false
+}
+
+// isUnique is the validation function for validating if each array|slice|map value is unique
+func isUnique(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+ v := reflect.ValueOf(struct{}{})
+
+ switch field.Kind() {
+ case reflect.Slice, reflect.Array:
+ elem := field.Type().Elem()
+ if elem.Kind() == reflect.Ptr {
+ elem = elem.Elem()
+ }
+
+ if param == "" {
+ m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
+
+ for i := 0; i < field.Len(); i++ {
+ m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
+ }
+ return field.Len() == m.Len()
+ }
+
+ sf, ok := elem.FieldByName(param)
+ if !ok {
+ panic(fmt.Sprintf("Bad field name %s", param))
+ }
+
+ sfTyp := sf.Type
+ if sfTyp.Kind() == reflect.Ptr {
+ sfTyp = sfTyp.Elem()
+ }
+
+ m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
+ for i := 0; i < field.Len(); i++ {
+ m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v)
+ }
+ return field.Len() == m.Len()
+ case reflect.Map:
+ m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
+
+ for _, k := range field.MapKeys() {
+ m.SetMapIndex(field.MapIndex(k), v)
+ }
+ return field.Len() == m.Len()
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+}
+
+// IsMAC is the validation function for validating if the field's value is a valid MAC address.
+func isMAC(fl FieldLevel) bool {
+
+ _, err := net.ParseMAC(fl.Field().String())
+
+ return err == nil
+}
+
+// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
+func isCIDRv4(fl FieldLevel) bool {
+
+ ip, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil && ip.To4() != nil
+}
+
+// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
+func isCIDRv6(fl FieldLevel) bool {
+
+ ip, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil && ip.To4() == nil
+}
+
+// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
+func isCIDR(fl FieldLevel) bool {
+
+ _, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil
+}
+
+// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
+func isIPv4(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil && ip.To4() != nil
+}
+
+// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
+func isIPv6(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil && ip.To4() == nil
+}
+
+// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
+func isIP(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil
+}
+
+// IsSSN is the validation function for validating if the field's value is a valid SSN.
+func isSSN(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ if field.Len() != 11 {
+ return false
+ }
+
+ return sSNRegex.MatchString(field.String())
+}
+
+// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
+func isLongitude(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ case reflect.Float32:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
+ case reflect.Float64:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ return longitudeRegex.MatchString(v)
+}
+
+// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
+func isLatitude(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ case reflect.Float32:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
+ case reflect.Float64:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ return latitudeRegex.MatchString(v)
+}
+
+// IsDataURI is the validation function for validating if the field's value is a valid data URI.
+func isDataURI(fl FieldLevel) bool {
+
+ uri := strings.SplitN(fl.Field().String(), ",", 2)
+
+ if len(uri) != 2 {
+ return false
+ }
+
+ if !dataURIRegex.MatchString(uri[0]) {
+ return false
+ }
+
+ return base64Regex.MatchString(uri[1])
+}
+
+// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
+func hasMultiByteCharacter(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ if field.Len() == 0 {
+ return true
+ }
+
+ return multibyteRegex.MatchString(field.String())
+}
+
+// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
+func isPrintableASCII(fl FieldLevel) bool {
+ return printableASCIIRegex.MatchString(fl.Field().String())
+}
+
+// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
+func isASCII(fl FieldLevel) bool {
+ return aSCIIRegex.MatchString(fl.Field().String())
+}
+
+// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
+func isUUID5(fl FieldLevel) bool {
+ return uUID5Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
+func isUUID4(fl FieldLevel) bool {
+ return uUID4Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
+func isUUID3(fl FieldLevel) bool {
+ return uUID3Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
+func isUUID(fl FieldLevel) bool {
+ return uUIDRegex.MatchString(fl.Field().String())
+}
+
+// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
+func isUUID5RFC4122(fl FieldLevel) bool {
+ return uUID5RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
+func isUUID4RFC4122(fl FieldLevel) bool {
+ return uUID4RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
+func isUUID3RFC4122(fl FieldLevel) bool {
+ return uUID3RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
+func isUUIDRFC4122(fl FieldLevel) bool {
+ return uUIDRFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
+func isISBN(fl FieldLevel) bool {
+ return isISBN10(fl) || isISBN13(fl)
+}
+
+// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
+func isISBN13(fl FieldLevel) bool {
+
+ s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4)
+
+ if !iSBN13Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ factor := []int32{1, 3}
+
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(s[i]-'0')
+ }
+
+ return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0
+}
+
+// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
+func isISBN10(fl FieldLevel) bool {
+
+ s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3)
+
+ if !iSBN10Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(s[i]-'0')
+ }
+
+ if s[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(s[9]-'0')
+ }
+
+ return checksum%11 == 0
+}
+
+// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address.
+func isEthereumAddress(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !ethAddressRegex.MatchString(address) {
+ return false
+ }
+
+ if ethAddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) {
+ return true
+ }
+
+ // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
+ address = address[2:] // Skip "0x" prefix.
+ h := sha3.NewLegacyKeccak256()
+ // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash
+ _, _ = h.Write([]byte(strings.ToLower(address)))
+ hash := hex.EncodeToString(h.Sum(nil))
+
+ for i := 0; i < len(address); i++ {
+ if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case.
+ continue
+ }
+ if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address
+func isBitcoinAddress(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !btcAddressRegex.MatchString(address) {
+ return false
+ }
+
+ alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
+
+ decode := [25]byte{}
+
+ for _, n := range []byte(address) {
+ d := bytes.IndexByte(alphabet, n)
+
+ for i := 24; i >= 0; i-- {
+ d += 58 * int(decode[i])
+ decode[i] = byte(d % 256)
+ d /= 256
+ }
+ }
+
+ h := sha256.New()
+ _, _ = h.Write(decode[:21])
+ d := h.Sum([]byte{})
+ h = sha256.New()
+ _, _ = h.Write(d)
+
+ validchecksum := [4]byte{}
+ computedchecksum := [4]byte{}
+
+ copy(computedchecksum[:], h.Sum(d[:0]))
+ copy(validchecksum[:], decode[21:])
+
+ return validchecksum == computedchecksum
+}
+
+// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
+func isBitcoinBech32Address(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {
+ return false
+ }
+
+ am := len(address) % 8
+
+ if am == 0 || am == 3 || am == 5 {
+ return false
+ }
+
+ address = strings.ToLower(address)
+
+ alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
+
+ hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc
+ addr := address[3:]
+ dp := make([]int, 0, len(addr))
+
+ for _, c := range addr {
+ dp = append(dp, strings.IndexRune(alphabet, c))
+ }
+
+ ver := dp[0]
+
+ if ver < 0 || ver > 16 {
+ return false
+ }
+
+ if ver == 0 {
+ if len(address) != 42 && len(address) != 62 {
+ return false
+ }
+ }
+
+ values := append(hr, dp...)
+
+ GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
+
+ p := 1
+
+ for _, v := range values {
+ b := p >> 25
+ p = (p&0x1ffffff)<<5 ^ v
+
+ for i := 0; i < 5; i++ {
+ if (b>>uint(i))&1 == 1 {
+ p ^= GEN[i]
+ }
+ }
+ }
+
+ if p != 1 {
+ return false
+ }
+
+ b := uint(0)
+ acc := 0
+ mv := (1 << 5) - 1
+ var sw []int
+
+ for _, v := range dp[1 : len(dp)-6] {
+ acc = (acc << 5) | v
+ b += 5
+ for b >= 8 {
+ b -= 8
+ sw = append(sw, (acc>>b)&mv)
+ }
+ }
+
+ if len(sw) < 2 || len(sw) > 40 {
+ return false
+ }
+
+ return true
+}
+
+// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
+func excludesRune(fl FieldLevel) bool {
+ return !containsRune(fl)
+}
+
+// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
+func excludesAll(fl FieldLevel) bool {
+ return !containsAny(fl)
+}
+
+// Excludes is the validation function for validating that the field's value does not contain the text specified within the param.
+func excludes(fl FieldLevel) bool {
+ return !contains(fl)
+}
+
+// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param.
+func containsRune(fl FieldLevel) bool {
+
+ r, _ := utf8.DecodeRuneInString(fl.Param())
+
+ return strings.ContainsRune(fl.Field().String(), r)
+}
+
+// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
+func containsAny(fl FieldLevel) bool {
+ return strings.ContainsAny(fl.Field().String(), fl.Param())
+}
+
+// Contains is the validation function for validating that the field's value contains the text specified within the param.
+func contains(fl FieldLevel) bool {
+ return strings.Contains(fl.Field().String(), fl.Param())
+}
+
+// StartsWith is the validation function for validating that the field's value starts with the text specified within the param.
+func startsWith(fl FieldLevel) bool {
+ return strings.HasPrefix(fl.Field().String(), fl.Param())
+}
+
+// EndsWith is the validation function for validating that the field's value ends with the text specified within the param.
+func endsWith(fl FieldLevel) bool {
+ return strings.HasSuffix(fl.Field().String(), fl.Param())
+}
+
+// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param.
+func startsNotWith(fl FieldLevel) bool {
+ return !startsWith(fl)
+}
+
+// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param.
+func endsNotWith(fl FieldLevel) bool {
+ return !endsWith(fl)
+}
+
+// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
+func fieldContains(fl FieldLevel) bool {
+ field := fl.Field()
+
+ currentField, _, ok := fl.GetStructFieldOK()
+
+ if !ok {
+ return false
+ }
+
+ return strings.Contains(field.String(), currentField.String())
+}
+
+// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
+func fieldExcludes(fl FieldLevel) bool {
+ field := fl.Field()
+
+ currentField, _, ok := fl.GetStructFieldOK()
+ if !ok {
+ return true
+ }
+
+ return !strings.Contains(field.String(), currentField.String())
+}
+
+// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
+func isNeField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+
+ if !ok || currentKind != kind {
+ return true
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() != currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() != currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() != currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) != int64(currentField.Len())
+
+ case reflect.Bool:
+ return field.Bool() != currentField.Bool()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() != currentField.String()
+}
+
+// IsNe is the validation function for validating that the field's value does not equal the provided param value.
+func isNe(fl FieldLevel) bool {
+ return !isEq(fl)
+}
+
+// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
+func isLteCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() <= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() <= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() <= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) <= int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() <= topField.String()
+}
+
+// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func isLtCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() < topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() < topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() < topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) < int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() < topField.String()
+}
+
+// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
+func isGteCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() >= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() >= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() >= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) >= int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() >= topField.String()
+}
+
+// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
+func isGtCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() > topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() > topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() > topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) > int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() > topField.String()
+}
+
+// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
+func isNeCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return true
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() != field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() != field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() != field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) != int64(field.Len())
+
+ case reflect.Bool:
+ return topField.Bool() != field.Bool()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() != field.String()
+}
+
+// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
+func isEqCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() == field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() == field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() == field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) == int64(field.Len())
+
+ case reflect.Bool:
+ return topField.Bool() == field.Bool()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() == field.String()
+}
+
+// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
+func isEqField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() == currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == int64(currentField.Len())
+
+ case reflect.Bool:
+ return field.Bool() == currentField.Bool()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() == currentField.String()
+}
+
+// IsEq is the validation function for validating if the current field's value is equal to the param's value.
+func isEq(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ return field.String() == param
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+
+ case reflect.Bool:
+ p := asBool(param)
+
+ return field.Bool() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2
+// example: `postcode_iso3166_alpha2=US`
+func isPostcodeByIso3166Alpha2(fl FieldLevel) bool {
+ field := fl.Field()
+ param := fl.Param()
+
+ reg, found := postCodeRegexDict[param]
+ if !found {
+ return false
+ }
+
+ return reg.MatchString(field.String())
+}
+
+// isPostcodeByIso3166Alpha2 validates by field which represents for a value of country code in iso 3166 alpha 2
+// example: `postcode_iso3166_alpha2_field=CountryCode`
+func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool {
+ field := fl.Field()
+ params := parseOneOfParam2(fl.Param())
+
+ if len(params) != 1 {
+ return false
+ }
+
+ currentField, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), params[0])
+ if !found {
+ return false
+ }
+
+ if kind != reflect.String {
+ panic(fmt.Sprintf("Bad field type %T", currentField.Interface()))
+ }
+
+ reg, found := postCodeRegexDict[currentField.String()]
+ if !found {
+ return false
+ }
+
+ return reg.MatchString(field.String())
+}
+
+// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
+func isBase64(fl FieldLevel) bool {
+ return base64Regex.MatchString(fl.Field().String())
+}
+
+// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
+func isBase64URL(fl FieldLevel) bool {
+ return base64URLRegex.MatchString(fl.Field().String())
+}
+
+// IsURI is the validation function for validating if the current field's value is a valid URI.
+func isURI(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i := strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if len(s) == 0 {
+ return false
+ }
+
+ _, err := url.ParseRequestURI(s)
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsURL is the validation function for validating if the current field's value is a valid URL.
+func isURL(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ var i int
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i = strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if len(s) == 0 {
+ return false
+ }
+
+ url, err := url.ParseRequestURI(s)
+
+ if err != nil || url.Scheme == "" {
+ return false
+ }
+
+ return true
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
+func isUrnRFC2141(fl FieldLevel) bool {
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ str := field.String()
+
+ _, match := urn.Parse([]byte(str))
+
+ return match
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsFile is the validation function for validating if the current field's value is a valid file path.
+func isFile(fl FieldLevel) bool {
+ field := fl.Field()
+
+ switch field.Kind() {
+ case reflect.String:
+ fileInfo, err := os.Stat(field.String())
+ if err != nil {
+ return false
+ }
+
+ return !fileInfo.IsDir()
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
+func isE164(fl FieldLevel) bool {
+ return e164Regex.MatchString(fl.Field().String())
+}
+
+// IsEmail is the validation function for validating if the current field's value is a valid email address.
+func isEmail(fl FieldLevel) bool {
+ return emailRegex.MatchString(fl.Field().String())
+}
+
+// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
+func isHSLA(fl FieldLevel) bool {
+ return hslaRegex.MatchString(fl.Field().String())
+}
+
+// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
+func isHSL(fl FieldLevel) bool {
+ return hslRegex.MatchString(fl.Field().String())
+}
+
+// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
+func isRGBA(fl FieldLevel) bool {
+ return rgbaRegex.MatchString(fl.Field().String())
+}
+
+// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
+func isRGB(fl FieldLevel) bool {
+ return rgbRegex.MatchString(fl.Field().String())
+}
+
+// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
+func isHEXColor(fl FieldLevel) bool {
+ return hexColorRegex.MatchString(fl.Field().String())
+}
+
+// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
+func isHexadecimal(fl FieldLevel) bool {
+ return hexadecimalRegex.MatchString(fl.Field().String())
+}
+
+// IsNumber is the validation function for validating if the current field's value is a valid number.
+func isNumber(fl FieldLevel) bool {
+ switch fl.Field().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return true
+ default:
+ return numberRegex.MatchString(fl.Field().String())
+ }
+}
+
+// IsNumeric is the validation function for validating if the current field's value is a valid numeric value.
+func isNumeric(fl FieldLevel) bool {
+ switch fl.Field().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return true
+ default:
+ return numericRegex.MatchString(fl.Field().String())
+ }
+}
+
+// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
+func isAlphanum(fl FieldLevel) bool {
+ return alphaNumericRegex.MatchString(fl.Field().String())
+}
+
+// IsAlpha is the validation function for validating if the current field's value is a valid alpha value.
+func isAlpha(fl FieldLevel) bool {
+ return alphaRegex.MatchString(fl.Field().String())
+}
+
+// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
+func isAlphanumUnicode(fl FieldLevel) bool {
+ return alphaUnicodeNumericRegex.MatchString(fl.Field().String())
+}
+
+// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
+func isAlphaUnicode(fl FieldLevel) bool {
+ return alphaUnicodeRegex.MatchString(fl.Field().String())
+}
+
+// isDefault is the opposite of required aka hasValue
+func isDefault(fl FieldLevel) bool {
+ return !hasValue(fl)
+}
+
+// HasValue is the validation function for validating if the current field's value is not the default static value.
+func hasValue(fl FieldLevel) bool {
+ field := fl.Field()
+ switch field.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return !field.IsNil()
+ default:
+ if fl.(*validate).fldIsPointer && field.Interface() != nil {
+ return true
+ }
+ return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+ }
+}
+
+// requireCheckField is a func for check field kind
+func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool {
+ field := fl.Field()
+ kind := field.Kind()
+ var nullable, found bool
+ if len(param) > 0 {
+ field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
+ if !found {
+ return defaultNotFoundValue
+ }
+ }
+ switch kind {
+ case reflect.Invalid:
+ return defaultNotFoundValue
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return field.IsNil()
+ default:
+ if nullable && field.Interface() != nil {
+ return false
+ }
+ return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface()
+ }
+}
+
+// requireCheckFieldValue is a func for check field value
+func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool {
+ field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
+ if !found {
+ return defaultNotFoundValue
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == asInt(value)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == asUint(value)
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() == asFloat(value)
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == asInt(value)
+
+ case reflect.Bool:
+ return field.Bool() == asBool(value)
+ }
+
+ // default reflect.String:
+ return field.String() == value
+}
+
+// requiredIf is the validation function
+// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field.
+func requiredIf(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// requiredUnless is the validation function
+// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
+func requiredUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName()))
+ }
+
+ for i := 0; i < len(params); i += 2 {
+ if requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// ExcludedWith is the validation function
+// The field under validation must not be present or is empty if any of the other specified fields are present.
+func excludedWith(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return !hasValue(fl)
+ }
+ }
+ return true
+}
+
+// RequiredWith is the validation function
+// The field under validation must be present and not empty only if any of the other specified fields are present.
+func requiredWith(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return hasValue(fl)
+ }
+ }
+ return true
+}
+
+// ExcludedWithAll is the validation function
+// The field under validation must not be present or is empty if all of the other specified fields are present.
+func excludedWithAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// RequiredWithAll is the validation function
+// The field under validation must be present and not empty only if all of the other specified fields are present.
+func requiredWithAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// ExcludedWithout is the validation function
+// The field under validation must not be present or is empty when any of the other specified fields are not present.
+func excludedWithout(fl FieldLevel) bool {
+ if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
+ return !hasValue(fl)
+ }
+ return true
+}
+
+// RequiredWithout is the validation function
+// The field under validation must be present and not empty only when any of the other specified fields are not present.
+func requiredWithout(fl FieldLevel) bool {
+ if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
+ return hasValue(fl)
+ }
+ return true
+}
+
+// ExcludedWithoutAll is the validation function
+// The field under validation must not be present or is empty when all of the other specified fields are not present.
+func excludedWithoutAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// RequiredWithoutAll is the validation function
+// The field under validation must be present and not empty only when all of the other specified fields are not present.
+func requiredWithoutAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
+func isGteField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() >= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() >= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() >= currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) >= len(currentField.String())
+}
+
+// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
+func isGtField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() > currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() > currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() > currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) > len(currentField.String())
+}
+
+// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
+func isGte(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) >= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) >= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() >= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() >= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() >= p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.After(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsGt is the validation function for validating if the current field's value is greater than the param's value.
+func isGt(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) > p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) > p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() > p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() > p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() > p
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ return field.Interface().(time.Time).After(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
+func hasLengthOf(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) == p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
+func hasMinOf(fl FieldLevel) bool {
+ return isGte(fl)
+}
+
+// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
+func isLteField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() <= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() <= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() <= currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) <= len(currentField.String())
+}
+
+// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
+func isLtField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() < currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() < currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() < currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) < len(currentField.String())
+}
+
+// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
+func isLte(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) <= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) <= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() <= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() <= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() <= p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.Before(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsLt is the validation function for validating if the current field's value is less than the param's value.
+func isLt(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) < p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) < p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() < p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() < p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() < p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ return field.Interface().(time.Time).Before(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
+func hasMaxOf(fl FieldLevel) bool {
+ return isLte(fl)
+}
+
+// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
+func isTCP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp4", fl.Field().String())
+ return err == nil
+}
+
+// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
+func isTCP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
+func isTCPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) && !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
+func isUDP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp4", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
+func isUDP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
+func isUDPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) && !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
+func isIP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIPv4(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip4", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
+func isIP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIPv6(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
+func isIPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
+func isUnixAddrResolvable(fl FieldLevel) bool {
+
+ _, err := net.ResolveUnixAddr("unix", fl.Field().String())
+
+ return err == nil
+}
+
+func isIP4Addr(fl FieldLevel) bool {
+
+ val := fl.Field().String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ val = val[0:idx]
+ }
+
+ ip := net.ParseIP(val)
+
+ return ip != nil && ip.To4() != nil
+}
+
+func isIP6Addr(fl FieldLevel) bool {
+
+ val := fl.Field().String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ if idx != 0 && val[idx-1:idx] == "]" {
+ val = val[1 : idx-1]
+ }
+ }
+
+ ip := net.ParseIP(val)
+
+ return ip != nil && ip.To4() == nil
+}
+
+func isHostnameRFC952(fl FieldLevel) bool {
+ return hostnameRegexRFC952.MatchString(fl.Field().String())
+}
+
+func isHostnameRFC1123(fl FieldLevel) bool {
+ return hostnameRegexRFC1123.MatchString(fl.Field().String())
+}
+
+func isFQDN(fl FieldLevel) bool {
+ val := fl.Field().String()
+
+ if val == "" {
+ return false
+ }
+
+ return fqdnRegexRFC1123.MatchString(val)
+}
+
+// IsDir is the validation function for validating if the current field's value is a valid directory.
+func isDir(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ fileInfo, err := os.Stat(field.String())
+ if err != nil {
+ return false
+ }
+
+ return fileInfo.IsDir()
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isJSON is the validation function for validating if the current field's value is a valid json string.
+func isJSON(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ val := field.String()
+ return json.Valid([]byte(val))
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isHostnamePort validates a : combination for fields typically used for socket address.
+func isHostnamePort(fl FieldLevel) bool {
+ val := fl.Field().String()
+ host, port, err := net.SplitHostPort(val)
+ if err != nil {
+ return false
+ }
+ // Port must be a iny <= 65535.
+ if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 {
+ return false
+ }
+
+ // If host is specified, it should match a DNS name
+ if host != "" {
+ return hostnameRegexRFC1123.MatchString(host)
+ }
+ return true
+}
+
+// isLowercase is the validation function for validating if the current field's value is a lowercase string.
+func isLowercase(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ if field.String() == "" {
+ return false
+ }
+ return field.String() == strings.ToLower(field.String())
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isUppercase is the validation function for validating if the current field's value is an uppercase string.
+func isUppercase(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ if field.String() == "" {
+ return false
+ }
+ return field.String() == strings.ToUpper(field.String())
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isDatetime is the validation function for validating if the current field's value is a valid datetime string.
+func isDatetime(fl FieldLevel) bool {
+ field := fl.Field()
+ param := fl.Param()
+
+ if field.Kind() == reflect.String {
+ _, err := time.Parse(param, field.String())
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isTimeZone is the validation function for validating if the current field's value is a valid time zone string.
+func isTimeZone(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name
+ if field.String() == "" {
+ return false
+ }
+
+ // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name
+ if strings.ToLower(field.String()) == "local" {
+ return false
+ }
+
+ _, err := time.LoadLocation(field.String())
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code.
+func isIso3166Alpha2(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return iso3166_1_alpha2[val]
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
+func isIso3166Alpha3(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return iso3166_1_alpha3[val]
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
+func isIso3166AlphaNumeric(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var code int
+ switch field.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ code = int(field.Int() % 1000)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ code = int(field.Uint() % 1000)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+ return iso3166_1_alpha_numeric[code]
+}
+
+// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse
+func isBCP47LanguageTag(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ _, err := language.Parse(field.String())
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362
+func isIsoBicFormat(fl FieldLevel) bool {
+ bicString := fl.Field().String()
+
+ return bicRegex.MatchString(bicString)
+}
diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go
new file mode 100644
index 0000000..0d18d6e
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/cache.go
@@ -0,0 +1,322 @@
+package validator
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type tagType uint8
+
+const (
+ typeDefault tagType = iota
+ typeOmitEmpty
+ typeIsDefault
+ typeNoStructLevel
+ typeStructOnly
+ typeDive
+ typeOr
+ typeKeys
+ typeEndKeys
+)
+
+const (
+ invalidValidation = "Invalid validation tag on field '%s'"
+ undefinedValidation = "Undefined validation function '%s' on field '%s'"
+ keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
+)
+
+type structCache struct {
+ lock sync.Mutex
+ m atomic.Value // map[reflect.Type]*cStruct
+}
+
+func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
+ c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
+ return
+}
+
+func (sc *structCache) Set(key reflect.Type, value *cStruct) {
+ m := sc.m.Load().(map[reflect.Type]*cStruct)
+ nm := make(map[reflect.Type]*cStruct, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ sc.m.Store(nm)
+}
+
+type tagCache struct {
+ lock sync.Mutex
+ m atomic.Value // map[string]*cTag
+}
+
+func (tc *tagCache) Get(key string) (c *cTag, found bool) {
+ c, found = tc.m.Load().(map[string]*cTag)[key]
+ return
+}
+
+func (tc *tagCache) Set(key string, value *cTag) {
+ m := tc.m.Load().(map[string]*cTag)
+ nm := make(map[string]*cTag, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ tc.m.Store(nm)
+}
+
+type cStruct struct {
+ name string
+ fields []*cField
+ fn StructLevelFuncCtx
+}
+
+type cField struct {
+ idx int
+ name string
+ altName string
+ namesEqual bool
+ cTags *cTag
+}
+
+type cTag struct {
+ tag string
+ aliasTag string
+ actualAliasTag string
+ param string
+ keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
+ next *cTag
+ fn FuncCtx
+ typeof tagType
+ hasTag bool
+ hasAlias bool
+ hasParam bool // true if parameter used eg. eq= where the equal sign has been set
+ isBlockEnd bool // indicates the current tag represents the last validation in the block
+ runValidationWhenNil bool
+}
+
+func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
+ v.structCache.lock.Lock()
+ defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
+
+ typ := current.Type()
+
+ // could have been multiple trying to access, but once first is done this ensures struct
+ // isn't parsed again.
+ cs, ok := v.structCache.Get(typ)
+ if ok {
+ return cs
+ }
+
+ cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
+
+ numFields := current.NumField()
+
+ var ctag *cTag
+ var fld reflect.StructField
+ var tag string
+ var customName string
+
+ for i := 0; i < numFields; i++ {
+
+ fld = typ.Field(i)
+
+ if !fld.Anonymous && len(fld.PkgPath) > 0 {
+ continue
+ }
+
+ tag = fld.Tag.Get(v.tagName)
+
+ if tag == skipValidationTag {
+ continue
+ }
+
+ customName = fld.Name
+
+ if v.hasTagNameFunc {
+ name := v.tagNameFunc(fld)
+ if len(name) > 0 {
+ customName = name
+ }
+ }
+
+ // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
+ // and so only struct level caching can be used instead of combined with Field tag caching
+
+ if len(tag) > 0 {
+ ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
+ } else {
+ // even if field doesn't have validations need cTag for traversing to potential inner/nested
+ // elements of the field.
+ ctag = new(cTag)
+ }
+
+ cs.fields = append(cs.fields, &cField{
+ idx: i,
+ name: fld.Name,
+ altName: customName,
+ cTags: ctag,
+ namesEqual: fld.Name == customName,
+ })
+ }
+ v.structCache.Set(typ, cs)
+ return cs
+}
+
+func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
+ var t string
+ noAlias := len(alias) == 0
+ tags := strings.Split(tag, tagSeparator)
+
+ for i := 0; i < len(tags); i++ {
+ t = tags[i]
+ if noAlias {
+ alias = t
+ }
+
+ // check map for alias and process new tags, otherwise process as usual
+ if tagsVal, found := v.aliases[t]; found {
+ if i == 0 {
+ firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+ } else {
+ next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+ current.next, current = next, curr
+
+ }
+ continue
+ }
+
+ var prevTag tagType
+
+ if i == 0 {
+ current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault}
+ firstCtag = current
+ } else {
+ prevTag = current.typeof
+ current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
+ current = current.next
+ }
+
+ switch t {
+ case diveTag:
+ current.typeof = typeDive
+ continue
+
+ case keysTag:
+ current.typeof = typeKeys
+
+ if i == 0 || prevTag != typeDive {
+ panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag))
+ }
+
+ current.typeof = typeKeys
+
+ // need to pass along only keys tag
+ // need to increment i to skip over the keys tags
+ b := make([]byte, 0, 64)
+
+ i++
+
+ for ; i < len(tags); i++ {
+
+ b = append(b, tags[i]...)
+ b = append(b, ',')
+
+ if tags[i] == endKeysTag {
+ break
+ }
+ }
+
+ current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
+ continue
+
+ case endKeysTag:
+ current.typeof = typeEndKeys
+
+ // if there are more in tags then there was no keysTag defined
+ // and an error should be thrown
+ if i != len(tags)-1 {
+ panic(keysTagNotDefined)
+ }
+ return
+
+ case omitempty:
+ current.typeof = typeOmitEmpty
+ continue
+
+ case structOnlyTag:
+ current.typeof = typeStructOnly
+ continue
+
+ case noStructLevelTag:
+ current.typeof = typeNoStructLevel
+ continue
+
+ default:
+ if t == isdefault {
+ current.typeof = typeIsDefault
+ }
+ // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
+ orVals := strings.Split(t, orSeparator)
+
+ for j := 0; j < len(orVals); j++ {
+ vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
+ if noAlias {
+ alias = vals[0]
+ current.aliasTag = alias
+ } else {
+ current.actualAliasTag = t
+ }
+
+ if j > 0 {
+ current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
+ current = current.next
+ }
+ current.hasParam = len(vals) > 1
+
+ current.tag = vals[0]
+ if len(current.tag) == 0 {
+ panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
+ }
+
+ if wrapper, ok := v.validations[current.tag]; ok {
+ current.fn = wrapper.fn
+ current.runValidationWhenNil = wrapper.runValidatinOnNil
+ } else {
+ panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
+ }
+
+ if len(orVals) > 1 {
+ current.typeof = typeOr
+ }
+
+ if len(vals) > 1 {
+ current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
+ }
+ }
+ current.isBlockEnd = true
+ }
+ }
+ return
+}
+
+func (v *Validate) fetchCacheTag(tag string) *cTag {
+ // find cached tag
+ ctag, found := v.tagCache.Get(tag)
+ if !found {
+ v.tagCache.lock.Lock()
+ defer v.tagCache.lock.Unlock()
+
+ // could have been multiple trying to access, but once first is done this ensures tag
+ // isn't parsed again.
+ ctag, found = v.tagCache.Get(tag)
+ if !found {
+ ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
+ v.tagCache.Set(tag, ctag)
+ }
+ }
+ return ctag
+}
diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go
new file mode 100644
index 0000000..ef81ead
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/country_codes.go
@@ -0,0 +1,162 @@
+package validator
+
+var iso3166_1_alpha2 = map[string]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true,
+ "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true,
+ "AR": true, "AM": true, "AW": true, "AU": true, "AT": true,
+ "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true,
+ "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true,
+ "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true,
+ "BV": true, "BR": true, "IO": true, "BN": true, "BG": true,
+ "BF": true, "BI": true, "KH": true, "CM": true, "CA": true,
+ "CV": true, "KY": true, "CF": true, "TD": true, "CL": true,
+ "CN": true, "CX": true, "CC": true, "CO": true, "KM": true,
+ "CG": true, "CD": true, "CK": true, "CR": true, "CI": true,
+ "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true,
+ "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true,
+ "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true,
+ "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true,
+ "FR": true, "GF": true, "PF": true, "TF": true, "GA": true,
+ "GM": true, "GE": true, "DE": true, "GH": true, "GI": true,
+ "GR": true, "GL": true, "GD": true, "GP": true, "GU": true,
+ "GT": true, "GG": true, "GN": true, "GW": true, "GY": true,
+ "HT": true, "HM": true, "VA": true, "HN": true, "HK": true,
+ "HU": true, "IS": true, "IN": true, "ID": true, "IR": true,
+ "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true,
+ "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true,
+ "KE": true, "KI": true, "KP": true, "KR": true, "KW": true,
+ "KG": true, "LA": true, "LV": true, "LB": true, "LS": true,
+ "LR": true, "LY": true, "LI": true, "LT": true, "LU": true,
+ "MO": true, "MK": true, "MG": true, "MW": true, "MY": true,
+ "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true,
+ "MR": true, "MU": true, "YT": true, "MX": true, "FM": true,
+ "MD": true, "MC": true, "MN": true, "ME": true, "MS": true,
+ "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true,
+ "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true,
+ "NE": true, "NG": true, "NU": true, "NF": true, "MP": true,
+ "NO": true, "OM": true, "PK": true, "PW": true, "PS": true,
+ "PA": true, "PG": true, "PY": true, "PE": true, "PH": true,
+ "PN": true, "PL": true, "PT": true, "PR": true, "QA": true,
+ "RE": true, "RO": true, "RU": true, "RW": true, "BL": true,
+ "SH": true, "KN": true, "LC": true, "MF": true, "PM": true,
+ "VC": true, "WS": true, "SM": true, "ST": true, "SA": true,
+ "SN": true, "RS": true, "SC": true, "SL": true, "SG": true,
+ "SX": true, "SK": true, "SI": true, "SB": true, "SO": true,
+ "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true,
+ "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true,
+ "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true,
+ "TH": true, "TL": true, "TG": true, "TK": true, "TO": true,
+ "TT": true, "TN": true, "TR": true, "TM": true, "TC": true,
+ "TV": true, "UG": true, "UA": true, "AE": true, "GB": true,
+ "US": true, "UM": true, "UY": true, "UZ": true, "VU": true,
+ "VE": true, "VN": true, "VG": true, "VI": true, "WF": true,
+ "EH": true, "YE": true, "ZM": true, "ZW": true,
+}
+
+var iso3166_1_alpha3 = map[string]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true,
+ "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true,
+ "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true,
+ "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true,
+ "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true,
+ "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true,
+ "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true,
+ "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true,
+ "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true,
+ "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true,
+ "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true,
+ "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true,
+ "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true,
+ "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true,
+ "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true,
+ "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true,
+ "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true,
+ "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true,
+ "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true,
+ "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true,
+ "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true,
+ "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true,
+ "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true,
+ "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true,
+ "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true,
+ "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true,
+ "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true,
+ "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true,
+ "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true,
+ "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true,
+ "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true,
+ "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true,
+ "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true,
+ "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true,
+ "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true,
+ "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true,
+ "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true,
+ "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true,
+ "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true,
+ "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true,
+ "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true,
+ "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true,
+ "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true,
+ "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true,
+ "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true,
+ "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true,
+ "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true,
+ "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true,
+ "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true,
+ "YEM": true, "ZMB": true, "ZWE": true, "ALA": true,
+}
+var iso3166_1_alpha_numeric = map[int]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ 4: true, 8: true, 12: true, 16: true, 20: true,
+ 24: true, 660: true, 10: true, 28: true, 32: true,
+ 51: true, 533: true, 36: true, 40: true, 31: true,
+ 44: true, 48: true, 50: true, 52: true, 112: true,
+ 56: true, 84: true, 204: true, 60: true, 64: true,
+ 68: true, 535: true, 70: true, 72: true, 74: true,
+ 76: true, 86: true, 96: true, 100: true, 854: true,
+ 108: true, 132: true, 116: true, 120: true, 124: true,
+ 136: true, 140: true, 148: true, 152: true, 156: true,
+ 162: true, 166: true, 170: true, 174: true, 180: true,
+ 178: true, 184: true, 188: true, 191: true, 192: true,
+ 531: true, 196: true, 203: true, 384: true, 208: true,
+ 262: true, 212: true, 214: true, 218: true, 818: true,
+ 222: true, 226: true, 232: true, 233: true, 748: true,
+ 231: true, 238: true, 234: true, 242: true, 246: true,
+ 250: true, 254: true, 258: true, 260: true, 266: true,
+ 270: true, 268: true, 276: true, 288: true, 292: true,
+ 300: true, 304: true, 308: true, 312: true, 316: true,
+ 320: true, 831: true, 324: true, 624: true, 328: true,
+ 332: true, 334: true, 336: true, 340: true, 344: true,
+ 348: true, 352: true, 356: true, 360: true, 364: true,
+ 368: true, 372: true, 833: true, 376: true, 380: true,
+ 388: true, 392: true, 832: true, 400: true, 398: true,
+ 404: true, 296: true, 408: true, 410: true, 414: true,
+ 417: true, 418: true, 428: true, 422: true, 426: true,
+ 430: true, 434: true, 438: true, 440: true, 442: true,
+ 446: true, 450: true, 454: true, 458: true, 462: true,
+ 466: true, 470: true, 584: true, 474: true, 478: true,
+ 480: true, 175: true, 484: true, 583: true, 498: true,
+ 492: true, 496: true, 499: true, 500: true, 504: true,
+ 508: true, 104: true, 516: true, 520: true, 524: true,
+ 528: true, 540: true, 554: true, 558: true, 562: true,
+ 566: true, 570: true, 574: true, 807: true, 580: true,
+ 578: true, 512: true, 586: true, 585: true, 275: true,
+ 591: true, 598: true, 600: true, 604: true, 608: true,
+ 612: true, 616: true, 620: true, 630: true, 634: true,
+ 642: true, 643: true, 646: true, 638: true, 652: true,
+ 654: true, 659: true, 662: true, 663: true, 666: true,
+ 670: true, 882: true, 674: true, 678: true, 682: true,
+ 686: true, 688: true, 690: true, 694: true, 702: true,
+ 534: true, 703: true, 705: true, 90: true, 706: true,
+ 710: true, 239: true, 728: true, 724: true, 144: true,
+ 729: true, 740: true, 744: true, 752: true, 756: true,
+ 760: true, 158: true, 762: true, 834: true, 764: true,
+ 626: true, 768: true, 772: true, 776: true, 780: true,
+ 788: true, 792: true, 795: true, 796: true, 798: true,
+ 800: true, 804: true, 784: true, 826: true, 581: true,
+ 840: true, 858: true, 860: true, 548: true, 862: true,
+ 704: true, 92: true, 850: true, 876: true, 732: true,
+ 887: true, 894: true, 716: true, 248: true,
+}
diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go
new file mode 100644
index 0000000..eafad0d
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/doc.go
@@ -0,0 +1,1322 @@
+/*
+Package validator implements value validations for structs and individual fields
+based on tags.
+
+It can also handle Cross-Field and Cross-Struct validation for nested structs
+and has the ability to dive into arrays and maps of any type.
+
+see more examples https://github.com/go-playground/validator/tree/master/_examples
+
+Validation Functions Return Type error
+
+Doing things this way is actually the way the standard library does, see the
+file.Open method here:
+
+ https://golang.org/pkg/os/#Open.
+
+The authors return type "error" to avoid the issue discussed in the following,
+where err is always != nil:
+
+ http://stackoverflow.com/a/29138676/3158232
+ https://github.com/go-playground/validator/issues/134
+
+Validator only InvalidValidationError for bad validation input, nil or
+ValidationErrors as type error; so, in your code all you need to do is check
+if the error returned is not nil, and if it's not check if error is
+InvalidValidationError ( if necessary, most of the time it isn't ) type cast
+it to type ValidationErrors like so err.(validator.ValidationErrors).
+
+Custom Validation Functions
+
+Custom Validation functions can be added. Example:
+
+ // Structure
+ func customFunc(fl validator.FieldLevel) bool {
+
+ if fl.Field().String() == "invalid" {
+ return false
+ }
+
+ return true
+ }
+
+ validate.RegisterValidation("custom tag name", customFunc)
+ // NOTES: using the same tag name as an existing function
+ // will overwrite the existing one
+
+Cross-Field Validation
+
+Cross-Field Validation can be done via the following tags:
+ - eqfield
+ - nefield
+ - gtfield
+ - gtefield
+ - ltfield
+ - ltefield
+ - eqcsfield
+ - necsfield
+ - gtcsfield
+ - gtecsfield
+ - ltcsfield
+ - ltecsfield
+
+If, however, some custom cross-field validation is required, it can be done
+using a custom validation.
+
+Why not just have cross-fields validation tags (i.e. only eqcsfield and not
+eqfield)?
+
+The reason is efficiency. If you want to check a field within the same struct
+"eqfield" only has to find the field on the same struct (1 level). But, if we
+used "eqcsfield" it could be multiple levels down. Example:
+
+ type Inner struct {
+ StartDate time.Time
+ }
+
+ type Outer struct {
+ InnerStructField *Inner
+ CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
+ }
+
+ now := time.Now()
+
+ inner := &Inner{
+ StartDate: now,
+ }
+
+ outer := &Outer{
+ InnerStructField: inner,
+ CreatedAt: now,
+ }
+
+ errs := validate.Struct(outer)
+
+ // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
+ // into the function
+ // when calling validate.VarWithValue(val, field, tag) val will be
+ // whatever you pass, struct, field...
+ // when calling validate.Field(field, tag) val will be nil
+
+Multiple Validators
+
+Multiple validators on a field will process in the order defined. Example:
+
+ type Test struct {
+ Field `validate:"max=10,min=1"`
+ }
+
+ // max will be checked then min
+
+Bad Validator definitions are not handled by the library. Example:
+
+ type Test struct {
+ Field `validate:"min=10,max=0"`
+ }
+
+ // this definition of min max will never succeed
+
+Using Validator Tags
+
+Baked In Cross-Field validation only compares fields on the same struct.
+If Cross-Field + Cross-Struct validation is needed you should implement your
+own custom validator.
+
+Comma (",") is the default separator of validation tags. If you wish to
+have a comma included within the parameter (i.e. excludesall=,) you will need to
+use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
+so the above will become excludesall=0x2C.
+
+ type Test struct {
+ Field `validate:"excludesall=,"` // BAD! Do not include a comma.
+ Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+Pipe ("|") is the 'or' validation tags deparator. If you wish to
+have a pipe included within the parameter i.e. excludesall=| you will need to
+use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
+so the above will become excludesall=0x7C
+
+ type Test struct {
+ Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
+ Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+
+Baked In Validators and Tags
+
+Here is a list of the current built in validators:
+
+
+Skip Field
+
+Tells the validation to skip this struct field; this is particularly
+handy in ignoring embedded structs from being validated. (Usage: -)
+ Usage: -
+
+
+Or Operator
+
+This is the 'or' operator allowing multiple validators to be used and
+accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba
+colors to be accepted. This can also be combined with 'and' for example
+( Usage: omitempty,rgb|rgba)
+
+ Usage: |
+
+StructOnly
+
+When a field that is a nested struct is encountered, and contains this flag
+any validation on the nested struct will be run, but none of the nested
+struct fields will be validated. This is useful if inside of your program
+you know the struct will be valid, but need to verify it has been assigned.
+NOTE: only "required" and "omitempty" can be used on a struct itself.
+
+ Usage: structonly
+
+NoStructLevel
+
+Same as structonly tag except that any struct level validations will not run.
+
+ Usage: nostructlevel
+
+Omit Empty
+
+Allows conditional validation, for example if a field is not set with
+a value (Determined by the "required" validator) then other validation
+such as min or max won't run, but if a value is set validation will run.
+
+ Usage: omitempty
+
+Dive
+
+This tells the validator to dive into a slice, array or map and validate that
+level of the slice, array or map with the validation tags that follow.
+Multidimensional nesting is also supported, each level you wish to dive will
+require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see
+the Keys & EndKeys section just below.
+
+ Usage: dive
+
+Example #1
+
+ [][]string with validation tag "gt=0,dive,len=1,dive,required"
+ // gt=0 will be applied to []
+ // len=1 will be applied to []string
+ // required will be applied to string
+
+Example #2
+
+ [][]string with validation tag "gt=0,dive,dive,required"
+ // gt=0 will be applied to []
+ // []string will be spared validation
+ // required will be applied to string
+
+Keys & EndKeys
+
+These are to be used together directly after the dive tag and tells the validator
+that anything between 'keys' and 'endkeys' applies to the keys of a map and not the
+values; think of it like the 'dive' tag, but for map keys instead of values.
+Multidimensional nesting is also supported, each level you wish to validate will
+require another 'keys' and 'endkeys' tag. These tags are only valid for maps.
+
+ Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags
+
+Example #1
+
+ map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required"
+ // gt=0 will be applied to the map itself
+ // eg=1|eq=2 will be applied to the map keys
+ // required will be applied to map values
+
+Example #2
+
+ map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
+ // gt=0 will be applied to the map itself
+ // eg=1|eq=2 will be applied to each array element in the the map keys
+ // required will be applied to map values
+
+Required
+
+This validates that the value is not the data types default zero value.
+For numbers ensures value is not zero. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required
+
+Required If
+
+The field under validation must be present and not empty only if all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil.
+
+ Usage: required_if
+
+Examples:
+
+ // require the field if the Field1 is equal to the parameter given:
+ Usage: required_if=Field1 foobar
+
+ // require the field if the Field1 and Field2 is equal to the value respectively:
+ Usage: required_if=Field1 foo Field2 bar
+
+Required Unless
+
+The field under validation must be present and not empty unless all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil.
+
+ Usage: required_unless
+
+Examples:
+
+ // require the field unless the Field1 is equal to the parameter given:
+ Usage: required_unless=Field1 foobar
+
+ // require the field unless the Field1 and Field2 is equal to the value respectively:
+ Usage: required_unless=Field1 foo Field2 bar
+
+Required With
+
+The field under validation must be present and not empty only if any
+of the other specified fields are present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_with
+
+Examples:
+
+ // require the field if the Field1 is present:
+ Usage: required_with=Field1
+
+ // require the field if the Field1 or Field2 is present:
+ Usage: required_with=Field1 Field2
+
+Required With All
+
+The field under validation must be present and not empty only if all
+of the other specified fields are present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_with_all
+
+Example:
+
+ // require the field if the Field1 and Field2 is present:
+ Usage: required_with_all=Field1 Field2
+
+Required Without
+
+The field under validation must be present and not empty only when any
+of the other specified fields are not present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_without
+
+Examples:
+
+ // require the field if the Field1 is not present:
+ Usage: required_without=Field1
+
+ // require the field if the Field1 or Field2 is not present:
+ Usage: required_without=Field1 Field2
+
+Required Without All
+
+The field under validation must be present and not empty only when all
+of the other specified fields are not present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_without_all
+
+Example:
+
+ // require the field if the Field1 and Field2 is not present:
+ Usage: required_without_all=Field1 Field2
+
+Is Default
+
+This validates that the value is the default value and is almost the
+opposite of required.
+
+ Usage: isdefault
+
+Length
+
+For numbers, length will ensure that the value is
+equal to the parameter given. For strings, it checks that
+the string length is exactly that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: len=10
+
+Example #2 (time.Duration)
+
+For time.Duration, len will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: len=1h30m
+
+Maximum
+
+For numbers, max will ensure that the value is
+less than or equal to the parameter given. For strings, it checks
+that the string length is at most that number of characters. For
+slices, arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: max=10
+
+Example #2 (time.Duration)
+
+For time.Duration, max will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: max=1h30m
+
+Minimum
+
+For numbers, min will ensure that the value is
+greater or equal to the parameter given. For strings, it checks that
+the string length is at least that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: min=10
+
+Example #2 (time.Duration)
+
+For time.Duration, min will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: min=1h30m
+
+Equals
+
+For strings & numbers, eq will ensure that the value is
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+Example #1
+
+ Usage: eq=10
+
+Example #2 (time.Duration)
+
+For time.Duration, eq will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: eq=1h30m
+
+Not Equal
+
+For strings & numbers, ne will ensure that the value is not
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+Example #1
+
+ Usage: ne=10
+
+Example #2 (time.Duration)
+
+For time.Duration, ne will ensure that the value is not equal to the duration
+given in the parameter.
+
+ Usage: ne=1h30m
+
+One Of
+
+For strings, ints, and uints, oneof will ensure that the value
+is one of the values in the parameter. The parameter should be
+a list of values separated by whitespace. Values may be
+strings or numbers. To match strings with spaces in them, include
+the target string between single quotes.
+
+ Usage: oneof=red green
+ oneof='red green' 'blue yellow'
+ oneof=5 7 9
+
+Greater Than
+
+For numbers, this will ensure that the value is greater than the
+parameter given. For strings, it checks that the string length
+is greater than that number of characters. For slices, arrays
+and maps it validates the number of items.
+
+Example #1
+
+ Usage: gt=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than time.Now.UTC().
+
+ Usage: gt
+
+Example #3 (time.Duration)
+
+For time.Duration, gt will ensure that the value is greater than the duration
+given in the parameter.
+
+ Usage: gt=1h30m
+
+Greater Than or Equal
+
+Same as 'min' above. Kept both to make terminology with 'len' easier.
+
+Example #1
+
+ Usage: gte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than or equal to time.Now.UTC().
+
+ Usage: gte
+
+Example #3 (time.Duration)
+
+For time.Duration, gte will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: gte=1h30m
+
+Less Than
+
+For numbers, this will ensure that the value is less than the parameter given.
+For strings, it checks that the string length is less than that number of
+characters. For slices, arrays, and maps it validates the number of items.
+
+Example #1
+
+ Usage: lt=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is less than time.Now.UTC().
+
+ Usage: lt
+
+Example #3 (time.Duration)
+
+For time.Duration, lt will ensure that the value is less than the duration given
+in the parameter.
+
+ Usage: lt=1h30m
+
+Less Than or Equal
+
+Same as 'max' above. Kept both to make terminology with 'len' easier.
+
+Example #1
+
+ Usage: lte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is less than or equal to time.Now.UTC().
+
+ Usage: lte
+
+Example #3 (time.Duration)
+
+For time.Duration, lte will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: lte=1h30m
+
+Field Equals Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Example #1:
+
+ // Validation on Password field using:
+ Usage: eqfield=ConfirmPassword
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(password, confirmpassword, "eqfield")
+
+Field Equals Another Field (relative)
+
+This does the same as eqfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: eqcsfield=InnerStructField.Field)
+
+Field Does Not Equal Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Examples:
+
+ // Confirm two colors are not the same:
+ //
+ // Validation on Color field:
+ Usage: nefield=Color2
+
+ // Validating by field:
+ validate.VarWithValue(color1, color2, "nefield")
+
+Field Does Not Equal Another Field (relative)
+
+This does the same as nefield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: necsfield=InnerStructField.Field
+
+Field Greater Than Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "gtfield")
+
+Field Greater Than Another Relative Field
+
+This does the same as gtfield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: gtcsfield=InnerStructField.Field
+
+Field Greater Than or Equal To Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "gtefield")
+
+Field Greater Than or Equal To Another Relative Field
+
+This does the same as gtefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: gtecsfield=InnerStructField.Field
+
+Less Than Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "ltfield")
+
+Less Than Another Relative Field
+
+This does the same as ltfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltcsfield=InnerStructField.Field
+
+Less Than or Equal To Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "ltefield")
+
+Less Than or Equal To Another Relative Field
+
+This does the same as ltefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltecsfield=InnerStructField.Field
+
+Field Contains Another Field
+
+This does the same as contains except for struct fields. It should only be used
+with string types. See the behavior of reflect.Value.String() for behavior on
+other types.
+
+ Usage: containsfield=InnerStructField.Field
+
+Field Excludes Another Field
+
+This does the same as excludes except for struct fields. It should only be used
+with string types. See the behavior of reflect.Value.String() for behavior on
+other types.
+
+ Usage: excludesfield=InnerStructField.Field
+
+Unique
+
+For arrays & slices, unique will ensure that there are no duplicates.
+For maps, unique will ensure that there are no duplicate values.
+For slices of struct, unique will ensure that there are no duplicate values
+in a field of the struct specified via a parameter.
+
+ // For arrays, slices, and maps:
+ Usage: unique
+
+ // For slices of struct:
+ Usage: unique=field
+
+Alpha Only
+
+This validates that a string value contains ASCII alpha characters only
+
+ Usage: alpha
+
+Alphanumeric
+
+This validates that a string value contains ASCII alphanumeric characters only
+
+ Usage: alphanum
+
+Alpha Unicode
+
+This validates that a string value contains unicode alpha characters only
+
+ Usage: alphaunicode
+
+Alphanumeric Unicode
+
+This validates that a string value contains unicode alphanumeric characters only
+
+ Usage: alphanumunicode
+
+Number
+
+This validates that a string value contains number values only.
+For integers or float it returns true.
+
+ Usage: number
+
+Numeric
+
+This validates that a string value contains a basic numeric value.
+basic excludes exponents etc...
+for integers or float it returns true.
+
+ Usage: numeric
+
+Hexadecimal String
+
+This validates that a string value contains a valid hexadecimal.
+
+ Usage: hexadecimal
+
+Hexcolor String
+
+This validates that a string value contains a valid hex color including
+hashtag (#)
+
+ Usage: hexcolor
+
+Lowercase String
+
+This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string.
+
+ Usage: lowercase
+
+Uppercase String
+
+This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string.
+
+ Usage: uppercase
+
+RGB String
+
+This validates that a string value contains a valid rgb color
+
+ Usage: rgb
+
+RGBA String
+
+This validates that a string value contains a valid rgba color
+
+ Usage: rgba
+
+HSL String
+
+This validates that a string value contains a valid hsl color
+
+ Usage: hsl
+
+HSLA String
+
+This validates that a string value contains a valid hsla color
+
+ Usage: hsla
+
+E.164 Phone Number String
+
+This validates that a string value contains a valid E.164 Phone number
+https://en.wikipedia.org/wiki/E.164 (ex. +1123456789)
+
+ Usage: e164
+
+E-mail String
+
+This validates that a string value contains a valid email
+This may not conform to all possibilities of any rfc standard, but neither
+does any email provider accept all possibilities.
+
+ Usage: email
+
+JSON String
+
+This validates that a string value is valid JSON
+
+ Usage: json
+
+File path
+
+This validates that a string value contains a valid file path and that
+the file exists on the machine.
+This is done using os.Stat, which is a platform independent function.
+
+ Usage: file
+
+URL String
+
+This validates that a string value contains a valid url
+This will accept any url the golang request uri accepts but must contain
+a schema for example http:// or rtmp://
+
+ Usage: url
+
+URI String
+
+This validates that a string value contains a valid uri
+This will accept any uri the golang request uri accepts
+
+ Usage: uri
+
+Urn RFC 2141 String
+
+This validataes that a string value contains a valid URN
+according to the RFC 2141 spec.
+
+ Usage: urn_rfc2141
+
+Base64 String
+
+This validates that a string value contains a valid base64 value.
+Although an empty string is valid base64 this will report an empty string
+as an error, if you wish to accept an empty string as valid you can use
+this with the omitempty tag.
+
+ Usage: base64
+
+Base64URL String
+
+This validates that a string value contains a valid base64 URL safe value
+according the the RFC4648 spec.
+Although an empty string is a valid base64 URL safe value, this will report
+an empty string as an error, if you wish to accept an empty string as valid
+you can use this with the omitempty tag.
+
+ Usage: base64url
+
+Bitcoin Address
+
+This validates that a string value contains a valid bitcoin address.
+The format of the string is checked to ensure it matches one of the three formats
+P2PKH, P2SH and performs checksum validation.
+
+ Usage: btc_addr
+
+Bitcoin Bech32 Address (segwit)
+
+This validates that a string value contains a valid bitcoin Bech32 address as defined
+by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki)
+Special thanks to Pieter Wuille for providng reference implementations.
+
+ Usage: btc_addr_bech32
+
+Ethereum Address
+
+This validates that a string value contains a valid ethereum address.
+The format of the string is checked to ensure it matches the standard Ethereum address format.
+
+ Usage: eth_addr
+
+Contains
+
+This validates that a string value contains the substring value.
+
+ Usage: contains=@
+
+Contains Any
+
+This validates that a string value contains any Unicode code points
+in the substring value.
+
+ Usage: containsany=!@#?
+
+Contains Rune
+
+This validates that a string value contains the supplied rune value.
+
+ Usage: containsrune=@
+
+Excludes
+
+This validates that a string value does not contain the substring value.
+
+ Usage: excludes=@
+
+Excludes All
+
+This validates that a string value does not contain any Unicode code
+points in the substring value.
+
+ Usage: excludesall=!@#?
+
+Excludes Rune
+
+This validates that a string value does not contain the supplied rune value.
+
+ Usage: excludesrune=@
+
+Starts With
+
+This validates that a string value starts with the supplied string value
+
+ Usage: startswith=hello
+
+Ends With
+
+This validates that a string value ends with the supplied string value
+
+ Usage: endswith=goodbye
+
+Does Not Start With
+
+This validates that a string value does not start with the supplied string value
+
+ Usage: startsnotwith=hello
+
+Does Not End With
+
+This validates that a string value does not end with the supplied string value
+
+ Usage: endsnotwith=goodbye
+
+International Standard Book Number
+
+This validates that a string value contains a valid isbn10 or isbn13 value.
+
+ Usage: isbn
+
+International Standard Book Number 10
+
+This validates that a string value contains a valid isbn10 value.
+
+ Usage: isbn10
+
+International Standard Book Number 13
+
+This validates that a string value contains a valid isbn13 value.
+
+ Usage: isbn13
+
+Universally Unique Identifier UUID
+
+This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead.
+
+ Usage: uuid
+
+Universally Unique Identifier UUID v3
+
+This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead.
+
+ Usage: uuid3
+
+Universally Unique Identifier UUID v4
+
+This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead.
+
+ Usage: uuid4
+
+Universally Unique Identifier UUID v5
+
+This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead.
+
+ Usage: uuid5
+
+ASCII
+
+This validates that a string value contains only ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: ascii
+
+Printable ASCII
+
+This validates that a string value contains only printable ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: printascii
+
+Multi-Byte Characters
+
+This validates that a string value contains one or more multibyte characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: multibyte
+
+Data URL
+
+This validates that a string value contains a valid DataURI.
+NOTE: this will also validate that the data portion is valid base64
+
+ Usage: datauri
+
+Latitude
+
+This validates that a string value contains a valid latitude.
+
+ Usage: latitude
+
+Longitude
+
+This validates that a string value contains a valid longitude.
+
+ Usage: longitude
+
+Social Security Number SSN
+
+This validates that a string value contains a valid U.S. Social Security Number.
+
+ Usage: ssn
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid IP Address.
+
+ Usage: ip
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid v4 IP Address.
+
+ Usage: ipv4
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid v6 IP Address.
+
+ Usage: ipv6
+
+Classless Inter-Domain Routing CIDR
+
+This validates that a string value contains a valid CIDR Address.
+
+ Usage: cidr
+
+Classless Inter-Domain Routing CIDRv4
+
+This validates that a string value contains a valid v4 CIDR Address.
+
+ Usage: cidrv4
+
+Classless Inter-Domain Routing CIDRv6
+
+This validates that a string value contains a valid v6 CIDR Address.
+
+ Usage: cidrv6
+
+Transmission Control Protocol Address TCP
+
+This validates that a string value contains a valid resolvable TCP Address.
+
+ Usage: tcp_addr
+
+Transmission Control Protocol Address TCPv4
+
+This validates that a string value contains a valid resolvable v4 TCP Address.
+
+ Usage: tcp4_addr
+
+Transmission Control Protocol Address TCPv6
+
+This validates that a string value contains a valid resolvable v6 TCP Address.
+
+ Usage: tcp6_addr
+
+User Datagram Protocol Address UDP
+
+This validates that a string value contains a valid resolvable UDP Address.
+
+ Usage: udp_addr
+
+User Datagram Protocol Address UDPv4
+
+This validates that a string value contains a valid resolvable v4 UDP Address.
+
+ Usage: udp4_addr
+
+User Datagram Protocol Address UDPv6
+
+This validates that a string value contains a valid resolvable v6 UDP Address.
+
+ Usage: udp6_addr
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid resolvable IP Address.
+
+ Usage: ip_addr
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid resolvable v4 IP Address.
+
+ Usage: ip4_addr
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid resolvable v6 IP Address.
+
+ Usage: ip6_addr
+
+Unix domain socket end point Address
+
+This validates that a string value contains a valid Unix Address.
+
+ Usage: unix_addr
+
+Media Access Control Address MAC
+
+This validates that a string value contains a valid MAC Address.
+
+ Usage: mac
+
+Note: See Go's ParseMAC for accepted formats and types:
+
+ http://golang.org/src/net/mac.go?s=866:918#L29
+
+Hostname RFC 952
+
+This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952
+
+ Usage: hostname
+
+Hostname RFC 1123
+
+This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123
+
+ Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias.
+
+Full Qualified Domain Name (FQDN)
+
+This validates that a string value contains a valid FQDN.
+
+ Usage: fqdn
+
+HTML Tags
+
+This validates that a string value appears to be an HTML element tag
+including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element
+
+ Usage: html
+
+HTML Encoded
+
+This validates that a string value is a proper character reference in decimal
+or hexadecimal format
+
+ Usage: html_encoded
+
+URL Encoded
+
+This validates that a string value is percent-encoded (URL encoded) according
+to https://tools.ietf.org/html/rfc3986#section-2.1
+
+ Usage: url_encoded
+
+Directory
+
+This validates that a string value contains a valid directory and that
+it exists on the machine.
+This is done using os.Stat, which is a platform independent function.
+
+ Usage: dir
+
+HostPort
+
+This validates that a string value contains a valid DNS hostname and port that
+can be used to valiate fields typically passed to sockets and connections.
+
+ Usage: hostname_port
+
+Datetime
+
+This validates that a string value is a valid datetime based on the supplied datetime format.
+Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/
+
+ Usage: datetime=2006-01-02
+
+Iso3166-1 alpha-2
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha2
+
+Iso3166-1 alpha-3
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+
+Iso3166-1 alpha-numeric
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+
+BCP 47 Language Tag
+
+This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse.
+More information on https://pkg.go.dev/golang.org/x/text/language
+
+ Usage: bcp47_language_tag
+
+BIC (SWIFT code)
+
+This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362.
+More information on https://www.iso.org/standard/60390.html
+
+ Usage: bic
+
+TimeZone
+
+This validates that a string value is a valid time zone based on the time zone database present on the system.
+Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator.
+More information on https://golang.org/pkg/time/#LoadLocation
+
+ Usage: timezone
+
+
+Alias Validators and Tags
+
+NOTE: When returning an error, the tag returned in "FieldError" will be
+the alias tag unless the dive tag is part of the alias. Everything after the
+dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
+case will be the actual tag within the alias that failed.
+
+Here is a list of the current built in alias tags:
+
+ "iscolor"
+ alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
+ "country_code"
+ alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code)
+
+Validator notes:
+
+ regex
+ a regex validator won't be added because commas and = signs can be part
+ of a regex which conflict with the validation definitions. Although
+ workarounds can be made, they take away from using pure regex's.
+ Furthermore it's quick and dirty but the regex's become harder to
+ maintain and are not reusable, so it's as much a programming philosophy
+ as anything.
+
+ In place of this new validator functions should be created; a regex can
+ be used within the validator function and even be precompiled for better
+ efficiency within regexes.go.
+
+ And the best reason, you can submit a pull request and we can keep on
+ adding to the validation library of this package!
+
+Non standard validators
+
+A collection of validation rules that are frequently needed but are more
+complex than the ones found in the baked in validators.
+A non standard validator must be registered manually like you would
+with your own custom validation functions.
+
+Example of registration and use:
+
+ type Test struct {
+ TestField string `validate:"yourtag"`
+ }
+
+ t := &Test{
+ TestField: "Test"
+ }
+
+ validate := validator.New()
+ validate.RegisterValidation("yourtag", validators.NotBlank)
+
+Here is a list of the current non standard validators:
+
+ NotBlank
+ This validates that the value is not blank or with length zero.
+ For strings ensures they do not contain only spaces. For channels, maps, slices and arrays
+ ensures they don't have zero length. For others, a non empty value is required.
+
+ Usage: notblank
+
+Panics
+
+This package panics when bad input is provided, this is by design, bad code like
+that should not make it to production.
+
+ type Test struct {
+ TestField string `validate:"nonexistantfunction=1"`
+ }
+
+ t := &Test{
+ TestField: "Test"
+ }
+
+ validate.Struct(t) // this will panic
+*/
+package validator
diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go
new file mode 100644
index 0000000..9a1b1ab
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/errors.go
@@ -0,0 +1,275 @@
+package validator
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+
+ ut "github.com/go-playground/universal-translator"
+)
+
+const (
+ fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
+)
+
+// ValidationErrorsTranslations is the translation return type
+type ValidationErrorsTranslations map[string]string
+
+// InvalidValidationError describes an invalid argument passed to
+// `Struct`, `StructExcept`, StructPartial` or `Field`
+type InvalidValidationError struct {
+ Type reflect.Type
+}
+
+// Error returns InvalidValidationError message
+func (e *InvalidValidationError) Error() string {
+
+ if e.Type == nil {
+ return "validator: (nil)"
+ }
+
+ return "validator: (nil " + e.Type.String() + ")"
+}
+
+// ValidationErrors is an array of FieldError's
+// for use in custom error messages post validation.
+type ValidationErrors []FieldError
+
+// Error is intended for use in development + debugging and not intended to be a production error message.
+// It allows ValidationErrors to subscribe to the Error interface.
+// All information to create an error message specific to your application is contained within
+// the FieldError found within the ValidationErrors array
+func (ve ValidationErrors) Error() string {
+
+ buff := bytes.NewBufferString("")
+
+ var fe *fieldError
+
+ for i := 0; i < len(ve); i++ {
+
+ fe = ve[i].(*fieldError)
+ buff.WriteString(fe.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// Translate translates all of the ValidationErrors
+func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
+
+ trans := make(ValidationErrorsTranslations)
+
+ var fe *fieldError
+
+ for i := 0; i < len(ve); i++ {
+ fe = ve[i].(*fieldError)
+
+ // // in case an Anonymous struct was used, ensure that the key
+ // // would be 'Username' instead of ".Username"
+ // if len(fe.ns) > 0 && fe.ns[:1] == "." {
+ // trans[fe.ns[1:]] = fe.Translate(ut)
+ // continue
+ // }
+
+ trans[fe.ns] = fe.Translate(ut)
+ }
+
+ return trans
+}
+
+// FieldError contains all functions to get error details
+type FieldError interface {
+
+ // Tag returns the validation tag that failed. if the
+ // validation was an alias, this will return the
+ // alias name and not the underlying tag that failed.
+ //
+ // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
+ // will return "iscolor"
+ Tag() string
+
+ // ActualTag returns the validation tag that failed, even if an
+ // alias the actual tag within the alias will be returned.
+ // If an 'or' validation fails the entire or will be returned.
+ //
+ // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
+ // will return "hexcolor|rgb|rgba|hsl|hsla"
+ ActualTag() string
+
+ // Namespace returns the namespace for the field error, with the tag
+ // name taking precedence over the field's actual name.
+ //
+ // eg. JSON name "User.fname"
+ //
+ // See StructNamespace() for a version that returns actual names.
+ //
+ // NOTE: this field can be blank when validating a single primitive field
+ // using validate.Field(...) as there is no way to extract it's name
+ Namespace() string
+
+ // StructNamespace returns the namespace for the field error, with the field's
+ // actual name.
+ //
+ // eq. "User.FirstName" see Namespace for comparison
+ //
+ // NOTE: this field can be blank when validating a single primitive field
+ // using validate.Field(...) as there is no way to extract its name
+ StructNamespace() string
+
+ // Field returns the fields name with the tag name taking precedence over the
+ // field's actual name.
+ //
+ // eq. JSON name "fname"
+ // see StructField for comparison
+ Field() string
+
+ // StructField returns the field's actual name from the struct, when able to determine.
+ //
+ // eq. "FirstName"
+ // see Field for comparison
+ StructField() string
+
+ // Value returns the actual field's value in case needed for creating the error
+ // message
+ Value() interface{}
+
+ // Param returns the param value, in string form for comparison; this will also
+ // help with generating an error message
+ Param() string
+
+ // Kind returns the Field's reflect Kind
+ //
+ // eg. time.Time's kind is a struct
+ Kind() reflect.Kind
+
+ // Type returns the Field's reflect Type
+ //
+ // eg. time.Time's type is time.Time
+ Type() reflect.Type
+
+ // Translate returns the FieldError's translated error
+ // from the provided 'ut.Translator' and registered 'TranslationFunc'
+ //
+ // NOTE: if no registered translator can be found it returns the same as
+ // calling fe.Error()
+ Translate(ut ut.Translator) string
+
+ // Error returns the FieldError's message
+ Error() string
+}
+
+// compile time interface checks
+var _ FieldError = new(fieldError)
+var _ error = new(fieldError)
+
+// fieldError contains a single field's validation error along
+// with other properties that may be needed for error message creation
+// it complies with the FieldError interface
+type fieldError struct {
+ v *Validate
+ tag string
+ actualTag string
+ ns string
+ structNs string
+ fieldLen uint8
+ structfieldLen uint8
+ value interface{}
+ param string
+ kind reflect.Kind
+ typ reflect.Type
+}
+
+// Tag returns the validation tag that failed.
+func (fe *fieldError) Tag() string {
+ return fe.tag
+}
+
+// ActualTag returns the validation tag that failed, even if an
+// alias the actual tag within the alias will be returned.
+func (fe *fieldError) ActualTag() string {
+ return fe.actualTag
+}
+
+// Namespace returns the namespace for the field error, with the tag
+// name taking precedence over the field's actual name.
+func (fe *fieldError) Namespace() string {
+ return fe.ns
+}
+
+// StructNamespace returns the namespace for the field error, with the field's
+// actual name.
+func (fe *fieldError) StructNamespace() string {
+ return fe.structNs
+}
+
+// Field returns the field's name with the tag name taking precedence over the
+// field's actual name.
+func (fe *fieldError) Field() string {
+
+ return fe.ns[len(fe.ns)-int(fe.fieldLen):]
+ // // return fe.field
+ // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
+
+ // log.Println("FLD:", fld)
+
+ // if len(fld) > 0 && fld[:1] == "." {
+ // return fld[1:]
+ // }
+
+ // return fld
+}
+
+// StructField returns the field's actual name from the struct, when able to determine.
+func (fe *fieldError) StructField() string {
+ // return fe.structField
+ return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
+}
+
+// Value returns the actual field's value in case needed for creating the error
+// message
+func (fe *fieldError) Value() interface{} {
+ return fe.value
+}
+
+// Param returns the param value, in string form for comparison; this will
+// also help with generating an error message
+func (fe *fieldError) Param() string {
+ return fe.param
+}
+
+// Kind returns the Field's reflect Kind
+func (fe *fieldError) Kind() reflect.Kind {
+ return fe.kind
+}
+
+// Type returns the Field's reflect Type
+func (fe *fieldError) Type() reflect.Type {
+ return fe.typ
+}
+
+// Error returns the fieldError's error message
+func (fe *fieldError) Error() string {
+ return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
+}
+
+// Translate returns the FieldError's translated error
+// from the provided 'ut.Translator' and registered 'TranslationFunc'
+//
+// NOTE: if no registered translation can be found, it returns the original
+// untranslated error message.
+func (fe *fieldError) Translate(ut ut.Translator) string {
+
+ m, ok := fe.v.transTagFunc[ut]
+ if !ok {
+ return fe.Error()
+ }
+
+ fn, ok := m[fe.tag]
+ if !ok {
+ return fe.Error()
+ }
+
+ return fn(ut, fe)
+}
diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go
new file mode 100644
index 0000000..ef35826
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/field_level.go
@@ -0,0 +1,120 @@
+package validator
+
+import "reflect"
+
+// FieldLevel contains all the information and helper functions
+// to validate a field
+type FieldLevel interface {
+
+ // Top returns the top level struct, if any
+ Top() reflect.Value
+
+ // Parent returns the current fields parent struct, if any or
+ // the comparison value if called 'VarWithValue'
+ Parent() reflect.Value
+
+ // Field returns current field for validation
+ Field() reflect.Value
+
+ // FieldName returns the field's name with the tag
+ // name taking precedence over the fields actual name.
+ FieldName() string
+
+ // StructFieldName returns the struct field's name
+ StructFieldName() string
+
+ // Param returns param for validation against current field
+ Param() string
+
+ // GetTag returns the current validations tag name
+ GetTag() string
+
+ // ExtractType gets the actual underlying type of field value.
+ // It will dive into pointers, customTypes and return you the
+ // underlying value and it's kind.
+ ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
+
+ // GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // in the param and returns the field, field kind and whether is was successful in retrieving
+ // the field at all.
+ //
+ // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+ // could not be retrieved because it didn't exist.
+ //
+ // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
+ GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
+
+ // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+ // the field and namespace allowing more extensibility for validators.
+ //
+ // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
+ GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
+
+ // GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
+ // the field at all.
+ //
+ // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+ // could not be retrieved because it didn't exist.
+ GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
+
+ // GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+ // the field and namespace allowing more extensibility for validators.
+ GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
+}
+
+var _ FieldLevel = new(validate)
+
+// Field returns current field for validation
+func (v *validate) Field() reflect.Value {
+ return v.flField
+}
+
+// FieldName returns the field's name with the tag
+// name taking precedence over the fields actual name.
+func (v *validate) FieldName() string {
+ return v.cf.altName
+}
+
+// GetTag returns the current validations tag name
+func (v *validate) GetTag() string {
+ return v.ct.tag
+}
+
+// StructFieldName returns the struct field's name
+func (v *validate) StructFieldName() string {
+ return v.cf.name
+}
+
+// Param returns param for validation against current field
+func (v *validate) Param() string {
+ return v.ct.param
+}
+
+// GetStructFieldOK returns Param returns param for validation against current field
+//
+// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
+func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
+ current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param)
+ return current, kind, found
+}
+
+// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+// the field and namespace allowing more extensibility for validators.
+//
+// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
+func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
+ current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace)
+ return current, kind, found
+}
+
+// GetStructFieldOK2 returns Param returns param for validation against current field
+func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
+ return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
+}
+
+// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+// the field and namespace allowing more extensibility for validators.
+func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
+ return v.getStructFieldOKInternal(val, namespace)
+}
diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod
new file mode 100644
index 0000000..53c4820
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/go.mod
@@ -0,0 +1,12 @@
+module github.com/go-playground/validator/v10
+
+go 1.13
+
+require (
+ github.com/go-playground/assert/v2 v2.0.1
+ github.com/go-playground/locales v0.13.0
+ github.com/go-playground/universal-translator v0.17.0
+ github.com/leodido/go-urn v1.2.0
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
+ golang.org/x/text v0.3.2 // indirect
+)
diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum
new file mode 100644
index 0000000..4b00cf6
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/go.sum
@@ -0,0 +1,33 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-playground/validator/v10/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png
new file mode 100644
index 0000000..355000f
Binary files /dev/null and b/vendor/github.com/go-playground/validator/v10/logo.png differ
diff --git a/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
new file mode 100644
index 0000000..e7e7b68
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
@@ -0,0 +1,173 @@
+package validator
+
+import "regexp"
+
+var postCodePatternDict = map[string]string{
+ "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`,
+ "JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "US": `^\d{5}([ \-]\d{4})?$`,
+ "CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`,
+ "DE": `^\d{5}$`,
+ "JP": `^\d{3}-\d{4}$`,
+ "FR": `^\d{2}[ ]?\d{3}$`,
+ "AU": `^\d{4}$`,
+ "IT": `^\d{5}$`,
+ "CH": `^\d{4}$`,
+ "AT": `^\d{4}$`,
+ "ES": `^\d{5}$`,
+ "NL": `^\d{4}[ ]?[A-Z]{2}$`,
+ "BE": `^\d{4}$`,
+ "DK": `^\d{4}$`,
+ "SE": `^\d{3}[ ]?\d{2}$`,
+ "NO": `^\d{4}$`,
+ "BR": `^\d{5}[\-]?\d{3}$`,
+ "PT": `^\d{4}([\-]\d{3})?$`,
+ "FI": `^\d{5}$`,
+ "AX": `^22\d{3}$`,
+ "KR": `^\d{3}[\-]\d{3}$`,
+ "CN": `^\d{6}$`,
+ "TW": `^\d{3}(\d{2})?$`,
+ "SG": `^\d{6}$`,
+ "DZ": `^\d{5}$`,
+ "AD": `^AD\d{3}$`,
+ "AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`,
+ "AM": `^(37)?\d{4}$`,
+ "AZ": `^\d{4}$`,
+ "BH": `^((1[0-2]|[2-9])\d{2})?$`,
+ "BD": `^\d{4}$`,
+ "BB": `^(BB\d{5})?$`,
+ "BY": `^\d{6}$`,
+ "BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`,
+ "BA": `^\d{5}$`,
+ "IO": `^BBND 1ZZ$`,
+ "BN": `^[A-Z]{2}[ ]?\d{4}$`,
+ "BG": `^\d{4}$`,
+ "KH": `^\d{5}$`,
+ "CV": `^\d{4}$`,
+ "CL": `^\d{7}$`,
+ "CR": `^\d{4,5}|\d{3}-\d{4}$`,
+ "HR": `^\d{5}$`,
+ "CY": `^\d{4}$`,
+ "CZ": `^\d{3}[ ]?\d{2}$`,
+ "DO": `^\d{5}$`,
+ "EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`,
+ "EG": `^\d{5}$`,
+ "EE": `^\d{5}$`,
+ "FO": `^\d{3}$`,
+ "GE": `^\d{4}$`,
+ "GR": `^\d{3}[ ]?\d{2}$`,
+ "GL": `^39\d{2}$`,
+ "GT": `^\d{5}$`,
+ "HT": `^\d{4}$`,
+ "HN": `^(?:\d{5})?$`,
+ "HU": `^\d{4}$`,
+ "IS": `^\d{3}$`,
+ "IN": `^\d{6}$`,
+ "ID": `^\d{5}$`,
+ "IL": `^\d{5}$`,
+ "JO": `^\d{5}$`,
+ "KZ": `^\d{6}$`,
+ "KE": `^\d{5}$`,
+ "KW": `^\d{5}$`,
+ "LA": `^\d{5}$`,
+ "LV": `^\d{4}$`,
+ "LB": `^(\d{4}([ ]?\d{4})?)?$`,
+ "LI": `^(948[5-9])|(949[0-7])$`,
+ "LT": `^\d{5}$`,
+ "LU": `^\d{4}$`,
+ "MK": `^\d{4}$`,
+ "MY": `^\d{5}$`,
+ "MV": `^\d{5}$`,
+ "MT": `^[A-Z]{3}[ ]?\d{2,4}$`,
+ "MU": `^(\d{3}[A-Z]{2}\d{3})?$`,
+ "MX": `^\d{5}$`,
+ "MD": `^\d{4}$`,
+ "MC": `^980\d{2}$`,
+ "MA": `^\d{5}$`,
+ "NP": `^\d{5}$`,
+ "NZ": `^\d{4}$`,
+ "NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`,
+ "NG": `^(\d{6})?$`,
+ "OM": `^(PC )?\d{3}$`,
+ "PK": `^\d{5}$`,
+ "PY": `^\d{4}$`,
+ "PH": `^\d{4}$`,
+ "PL": `^\d{2}-\d{3}$`,
+ "PR": `^00[679]\d{2}([ \-]\d{4})?$`,
+ "RO": `^\d{6}$`,
+ "RU": `^\d{6}$`,
+ "SM": `^4789\d$`,
+ "SA": `^\d{5}$`,
+ "SN": `^\d{5}$`,
+ "SK": `^\d{3}[ ]?\d{2}$`,
+ "SI": `^\d{4}$`,
+ "ZA": `^\d{4}$`,
+ "LK": `^\d{5}$`,
+ "TJ": `^\d{6}$`,
+ "TH": `^\d{5}$`,
+ "TN": `^\d{4}$`,
+ "TR": `^\d{5}$`,
+ "TM": `^\d{6}$`,
+ "UA": `^\d{5}$`,
+ "UY": `^\d{5}$`,
+ "UZ": `^\d{6}$`,
+ "VA": `^00120$`,
+ "VE": `^\d{4}$`,
+ "ZM": `^\d{5}$`,
+ "AS": `^96799$`,
+ "CC": `^6799$`,
+ "CK": `^\d{4}$`,
+ "RS": `^\d{6}$`,
+ "ME": `^8\d{4}$`,
+ "CS": `^\d{5}$`,
+ "YU": `^\d{5}$`,
+ "CX": `^6798$`,
+ "ET": `^\d{4}$`,
+ "FK": `^FIQQ 1ZZ$`,
+ "NF": `^2899$`,
+ "FM": `^(9694[1-4])([ \-]\d{4})?$`,
+ "GF": `^9[78]3\d{2}$`,
+ "GN": `^\d{3}$`,
+ "GP": `^9[78][01]\d{2}$`,
+ "GS": `^SIQQ 1ZZ$`,
+ "GU": `^969[123]\d([ \-]\d{4})?$`,
+ "GW": `^\d{4}$`,
+ "HM": `^\d{4}$`,
+ "IQ": `^\d{5}$`,
+ "KG": `^\d{6}$`,
+ "LR": `^\d{4}$`,
+ "LS": `^\d{3}$`,
+ "MG": `^\d{3}$`,
+ "MH": `^969[67]\d([ \-]\d{4})?$`,
+ "MN": `^\d{6}$`,
+ "MP": `^9695[012]([ \-]\d{4})?$`,
+ "MQ": `^9[78]2\d{2}$`,
+ "NC": `^988\d{2}$`,
+ "NE": `^\d{4}$`,
+ "VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
+ "VN": `^[0-9]{1,6}$`,
+ "PF": `^987\d{2}$`,
+ "PG": `^\d{3}$`,
+ "PM": `^9[78]5\d{2}$`,
+ "PN": `^PCRN 1ZZ$`,
+ "PW": `^96940$`,
+ "RE": `^9[78]4\d{2}$`,
+ "SH": `^(ASCN|STHL) 1ZZ$`,
+ "SJ": `^\d{4}$`,
+ "SO": `^\d{5}$`,
+ "SZ": `^[HLMS]\d{3}$`,
+ "TC": `^TKCA 1ZZ$`,
+ "WF": `^986\d{2}$`,
+ "XK": `^\d{5}$`,
+ "YT": `^976\d{2}$`,
+}
+
+var postCodeRegexDict = map[string]*regexp.Regexp{}
+
+func init() {
+ for countryCode, pattern := range postCodePatternDict {
+ postCodeRegexDict[countryCode] = regexp.MustCompile(pattern)
+ }
+}
diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go
new file mode 100644
index 0000000..ddcf785
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/regexes.go
@@ -0,0 +1,103 @@
+package validator
+
+import "regexp"
+
+const (
+ alphaRegexString = "^[a-zA-Z]+$"
+ alphaNumericRegexString = "^[a-zA-Z0-9]+$"
+ alphaUnicodeRegexString = "^[\\p{L}]+$"
+ alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
+ numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
+ numberRegexString = "^[0-9]+$"
+ hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
+ hexColorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
+ rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
+ hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
+ base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
+ iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
+ iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
+ uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
+ uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
+ uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
+ uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
+ aSCIIRegexString = "^[\x00-\x7F]*$"
+ printableASCIIRegexString = "^[\x20-\x7E]*$"
+ multibyteRegexString = "[^\x00-\x7F]"
+ dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)`
+ latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
+ hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
+ hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
+ fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
+ btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
+ btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
+ ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
+ ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
+ uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$`
+ hTMLEncodedRegexString = `[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?`
+ hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
+ splitParamsRegexString = `'[^']*'|\S+`
+ bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$`
+)
+
+var (
+ alphaRegex = regexp.MustCompile(alphaRegexString)
+ alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
+ alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
+ alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
+ numericRegex = regexp.MustCompile(numericRegexString)
+ numberRegex = regexp.MustCompile(numberRegexString)
+ hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
+ hexColorRegex = regexp.MustCompile(hexColorRegexString)
+ rgbRegex = regexp.MustCompile(rgbRegexString)
+ rgbaRegex = regexp.MustCompile(rgbaRegexString)
+ hslRegex = regexp.MustCompile(hslRegexString)
+ hslaRegex = regexp.MustCompile(hslaRegexString)
+ e164Regex = regexp.MustCompile(e164RegexString)
+ emailRegex = regexp.MustCompile(emailRegexString)
+ base64Regex = regexp.MustCompile(base64RegexString)
+ base64URLRegex = regexp.MustCompile(base64URLRegexString)
+ iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
+ iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
+ uUID3Regex = regexp.MustCompile(uUID3RegexString)
+ uUID4Regex = regexp.MustCompile(uUID4RegexString)
+ uUID5Regex = regexp.MustCompile(uUID5RegexString)
+ uUIDRegex = regexp.MustCompile(uUIDRegexString)
+ uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
+ uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
+ uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
+ uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
+ aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
+ printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
+ multibyteRegex = regexp.MustCompile(multibyteRegexString)
+ dataURIRegex = regexp.MustCompile(dataURIRegexString)
+ latitudeRegex = regexp.MustCompile(latitudeRegexString)
+ longitudeRegex = regexp.MustCompile(longitudeRegexString)
+ sSNRegex = regexp.MustCompile(sSNRegexString)
+ hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
+ hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
+ fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
+ btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
+ btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
+ btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
+ ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
+ ethAddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString)
+ ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString)
+ uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
+ hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
+ hTMLRegex = regexp.MustCompile(hTMLRegexString)
+ splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
+ bicRegex = regexp.MustCompile(bicRegexString)
+)
diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go
new file mode 100644
index 0000000..c0d89cf
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/struct_level.go
@@ -0,0 +1,175 @@
+package validator
+
+import (
+ "context"
+ "reflect"
+)
+
+// StructLevelFunc accepts all values needed for struct level validation
+type StructLevelFunc func(sl StructLevel)
+
+// StructLevelFuncCtx accepts all values needed for struct level validation
+// but also allows passing of contextual validation information via context.Context.
+type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
+
+// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx
+func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
+ return func(ctx context.Context, sl StructLevel) {
+ fn(sl)
+ }
+}
+
+// StructLevel contains all the information and helper functions
+// to validate a struct
+type StructLevel interface {
+
+ // Validator returns the main validation object, in case one wants to call validations internally.
+ // this is so you don't have to use anonymous functions to get access to the validate
+ // instance.
+ Validator() *Validate
+
+ // Top returns the top level struct, if any
+ Top() reflect.Value
+
+ // Parent returns the current fields parent struct, if any
+ Parent() reflect.Value
+
+ // Current returns the current struct.
+ Current() reflect.Value
+
+ // ExtractType gets the actual underlying type of field value.
+ // It will dive into pointers, customTypes and return you the
+ // underlying value and its kind.
+ ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
+
+ // ReportError reports an error just by passing the field and tag information
+ //
+ // NOTES:
+ //
+ // fieldName and altName get appended to the existing namespace that
+ // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending
+ // on the nesting
+ //
+ // tag can be an existing validation tag or just something you make up
+ // and process on the flip side it's up to you.
+ ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
+
+ // ReportValidationErrors reports an error just by passing ValidationErrors
+ //
+ // NOTES:
+ //
+ // relativeNamespace and relativeActualNamespace get appended to the
+ // existing namespace that validator is on.
+ // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
+ // on the nesting. most of the time they will be blank, unless you validate
+ // at a level lower the the current field depth
+ ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
+}
+
+var _ StructLevel = new(validate)
+
+// Top returns the top level struct
+//
+// NOTE: this can be the same as the current struct being validated
+// if not is a nested struct.
+//
+// this is only called when within Struct and Field Level validation and
+// should not be relied upon for an acurate value otherwise.
+func (v *validate) Top() reflect.Value {
+ return v.top
+}
+
+// Parent returns the current structs parent
+//
+// NOTE: this can be the same as the current struct being validated
+// if not is a nested struct.
+//
+// this is only called when within Struct and Field Level validation and
+// should not be relied upon for an acurate value otherwise.
+func (v *validate) Parent() reflect.Value {
+ return v.slflParent
+}
+
+// Current returns the current struct.
+func (v *validate) Current() reflect.Value {
+ return v.slCurrent
+}
+
+// Validator returns the main validation object, in case one want to call validations internally.
+func (v *validate) Validator() *Validate {
+ return v.v
+}
+
+// ExtractType gets the actual underlying type of field value.
+func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
+ return v.extractTypeInternal(field, false)
+}
+
+// ReportError reports an error just by passing the field and tag information
+func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
+
+ fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
+
+ if len(structFieldName) == 0 {
+ structFieldName = fieldName
+ }
+
+ v.str1 = string(append(v.ns, fieldName...))
+
+ if v.v.hasTagNameFunc || fieldName != structFieldName {
+ v.str2 = string(append(v.actualNs, structFieldName...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ if kind == reflect.Invalid {
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tag,
+ actualTag: tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(fieldName)),
+ structfieldLen: uint8(len(structFieldName)),
+ param: param,
+ kind: kind,
+ },
+ )
+ return
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tag,
+ actualTag: tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(fieldName)),
+ structfieldLen: uint8(len(structFieldName)),
+ value: fv.Interface(),
+ param: param,
+ kind: kind,
+ typ: fv.Type(),
+ },
+ )
+}
+
+// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
+//
+// NOTE: this function prepends the current namespace to the relative ones.
+func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
+
+ var err *fieldError
+
+ for i := 0; i < len(errs); i++ {
+
+ err = errs[i].(*fieldError)
+ err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
+ err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
+
+ v.errs = append(v.errs, err)
+ }
+}
diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go
new file mode 100644
index 0000000..4d9d75c
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/translations.go
@@ -0,0 +1,11 @@
+package validator
+
+import ut "github.com/go-playground/universal-translator"
+
+// TranslationFunc is the function type used to register or override
+// custom translations
+type TranslationFunc func(ut ut.Translator, fe FieldError) string
+
+// RegisterTranslationsFunc allows for registering of translations
+// for a 'ut.Translator' for use within the 'TranslationFunc'
+type RegisterTranslationsFunc func(ut ut.Translator) error
diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go
new file mode 100644
index 0000000..56420f4
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/util.go
@@ -0,0 +1,288 @@
+package validator
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// extractTypeInternal gets the actual underlying type of field value.
+// It will dive into pointers, customTypes and return you the
+// underlying value and it's kind.
+func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
+
+BEGIN:
+ switch current.Kind() {
+ case reflect.Ptr:
+
+ nullable = true
+
+ if current.IsNil() {
+ return current, reflect.Ptr, nullable
+ }
+
+ current = current.Elem()
+ goto BEGIN
+
+ case reflect.Interface:
+
+ nullable = true
+
+ if current.IsNil() {
+ return current, reflect.Interface, nullable
+ }
+
+ current = current.Elem()
+ goto BEGIN
+
+ case reflect.Invalid:
+ return current, reflect.Invalid, nullable
+
+ default:
+
+ if v.v.hasCustomFuncs {
+
+ if fn, ok := v.v.customFuncs[current.Type()]; ok {
+ current = reflect.ValueOf(fn(current))
+ goto BEGIN
+ }
+ }
+
+ return current, current.Kind(), nullable
+ }
+}
+
+// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
+// returns the field, field kind and whether is was successful in retrieving the field at all.
+//
+// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+// could not be retrieved because it didn't exist.
+func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) {
+
+BEGIN:
+ current, kind, nullable = v.ExtractType(val)
+ if kind == reflect.Invalid {
+ return
+ }
+
+ if namespace == "" {
+ found = true
+ return
+ }
+
+ switch kind {
+
+ case reflect.Ptr, reflect.Interface:
+ return
+
+ case reflect.Struct:
+
+ typ := current.Type()
+ fld := namespace
+ var ns string
+
+ if typ != timeType {
+
+ idx := strings.Index(namespace, namespaceSeparator)
+
+ if idx != -1 {
+ fld = namespace[:idx]
+ ns = namespace[idx+1:]
+ } else {
+ ns = ""
+ }
+
+ bracketIdx := strings.Index(fld, leftBracket)
+ if bracketIdx != -1 {
+ fld = fld[:bracketIdx]
+
+ ns = namespace[bracketIdx:]
+ }
+
+ val = current.FieldByName(fld)
+ namespace = ns
+ goto BEGIN
+ }
+
+ case reflect.Array, reflect.Slice:
+ idx := strings.Index(namespace, leftBracket)
+ idx2 := strings.Index(namespace, rightBracket)
+
+ arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
+
+ if arrIdx >= current.Len() {
+ return
+ }
+
+ startIdx := idx2 + 1
+
+ if startIdx < len(namespace) {
+ if namespace[startIdx:startIdx+1] == namespaceSeparator {
+ startIdx++
+ }
+ }
+
+ val = current.Index(arrIdx)
+ namespace = namespace[startIdx:]
+ goto BEGIN
+
+ case reflect.Map:
+ idx := strings.Index(namespace, leftBracket) + 1
+ idx2 := strings.Index(namespace, rightBracket)
+
+ endIdx := idx2
+
+ if endIdx+1 < len(namespace) {
+ if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
+ endIdx++
+ }
+ }
+
+ key := namespace[idx:idx2]
+
+ switch current.Type().Key().Kind() {
+ case reflect.Int:
+ i, _ := strconv.Atoi(key)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int8:
+ i, _ := strconv.ParseInt(key, 10, 8)
+ val = current.MapIndex(reflect.ValueOf(int8(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int16:
+ i, _ := strconv.ParseInt(key, 10, 16)
+ val = current.MapIndex(reflect.ValueOf(int16(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int32:
+ i, _ := strconv.ParseInt(key, 10, 32)
+ val = current.MapIndex(reflect.ValueOf(int32(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int64:
+ i, _ := strconv.ParseInt(key, 10, 64)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint:
+ i, _ := strconv.ParseUint(key, 10, 0)
+ val = current.MapIndex(reflect.ValueOf(uint(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint8:
+ i, _ := strconv.ParseUint(key, 10, 8)
+ val = current.MapIndex(reflect.ValueOf(uint8(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint16:
+ i, _ := strconv.ParseUint(key, 10, 16)
+ val = current.MapIndex(reflect.ValueOf(uint16(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint32:
+ i, _ := strconv.ParseUint(key, 10, 32)
+ val = current.MapIndex(reflect.ValueOf(uint32(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint64:
+ i, _ := strconv.ParseUint(key, 10, 64)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Float32:
+ f, _ := strconv.ParseFloat(key, 32)
+ val = current.MapIndex(reflect.ValueOf(float32(f)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Float64:
+ f, _ := strconv.ParseFloat(key, 64)
+ val = current.MapIndex(reflect.ValueOf(f))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Bool:
+ b, _ := strconv.ParseBool(key)
+ val = current.MapIndex(reflect.ValueOf(b))
+ namespace = namespace[endIdx+1:]
+
+ // reflect.Type = string
+ default:
+ val = current.MapIndex(reflect.ValueOf(key))
+ namespace = namespace[endIdx+1:]
+ }
+
+ goto BEGIN
+ }
+
+ // if got here there was more namespace, cannot go any deeper
+ panic("Invalid field namespace")
+}
+
+// asInt returns the parameter as a int64
+// or panics if it can't convert
+func asInt(param string) int64 {
+ i, err := strconv.ParseInt(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asIntFromTimeDuration parses param as time.Duration and returns it as int64
+// or panics on error.
+func asIntFromTimeDuration(param string) int64 {
+ d, err := time.ParseDuration(param)
+ if err != nil {
+ // attempt parsing as an an integer assuming nanosecond precision
+ return asInt(param)
+ }
+ return int64(d)
+}
+
+// asIntFromType calls the proper function to parse param as int64,
+// given a field's Type t.
+func asIntFromType(t reflect.Type, param string) int64 {
+ switch t {
+ case timeDurationType:
+ return asIntFromTimeDuration(param)
+ default:
+ return asInt(param)
+ }
+}
+
+// asUint returns the parameter as a uint64
+// or panics if it can't convert
+func asUint(param string) uint64 {
+
+ i, err := strconv.ParseUint(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asFloat returns the parameter as a float64
+// or panics if it can't convert
+func asFloat(param string) float64 {
+
+ i, err := strconv.ParseFloat(param, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asBool returns the parameter as a bool
+// or panics if it can't convert
+func asBool(param string) bool {
+
+ i, err := strconv.ParseBool(param)
+ panicIf(err)
+
+ return i
+}
+
+func panicIf(err error) {
+ if err != nil {
+ panic(err.Error())
+ }
+}
diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go
new file mode 100644
index 0000000..9569c0d
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/validator.go
@@ -0,0 +1,477 @@
+package validator
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// per validate construct
+type validate struct {
+ v *Validate
+ top reflect.Value
+ ns []byte
+ actualNs []byte
+ errs ValidationErrors
+ includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
+ ffn FilterFunc
+ slflParent reflect.Value // StructLevel & FieldLevel
+ slCurrent reflect.Value // StructLevel & FieldLevel
+ flField reflect.Value // StructLevel & FieldLevel
+ cf *cField // StructLevel & FieldLevel
+ ct *cTag // StructLevel & FieldLevel
+ misc []byte // misc reusable
+ str1 string // misc reusable
+ str2 string // misc reusable
+ fldIsPointer bool // StructLevel & FieldLevel
+ isPartial bool
+ hasExcludes bool
+}
+
+// parent and current will be the same the first run of validateStruct
+func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
+
+ cs, ok := v.v.structCache.Get(typ)
+ if !ok {
+ cs = v.v.extractStructCache(current, typ.Name())
+ }
+
+ if len(ns) == 0 && len(cs.name) != 0 {
+
+ ns = append(ns, cs.name...)
+ ns = append(ns, '.')
+
+ structNs = append(structNs, cs.name...)
+ structNs = append(structNs, '.')
+ }
+
+ // ct is nil on top level struct, and structs as fields that have no tag info
+ // so if nil or if not nil and the structonly tag isn't present
+ if ct == nil || ct.typeof != typeStructOnly {
+
+ var f *cField
+
+ for i := 0; i < len(cs.fields); i++ {
+
+ f = cs.fields[i]
+
+ if v.isPartial {
+
+ if v.ffn != nil {
+ // used with StructFiltered
+ if v.ffn(append(structNs, f.name...)) {
+ continue
+ }
+
+ } else {
+ // used with StructPartial & StructExcept
+ _, ok = v.includeExclude[string(append(structNs, f.name...))]
+
+ if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
+ continue
+ }
+ }
+ }
+
+ v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags)
+ }
+ }
+
+ // check if any struct level validations, after all field validations already checked.
+ // first iteration will have no info about nostructlevel tag, and is checked prior to
+ // calling the next iteration of validateStruct called from traverseField.
+ if cs.fn != nil {
+
+ v.slflParent = parent
+ v.slCurrent = current
+ v.ns = ns
+ v.actualNs = structNs
+
+ cs.fn(ctx, v)
+ }
+}
+
+// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
+func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
+ var typ reflect.Type
+ var kind reflect.Kind
+
+ current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
+
+ switch kind {
+ case reflect.Ptr, reflect.Interface, reflect.Invalid:
+
+ if ct == nil {
+ return
+ }
+
+ if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
+ return
+ }
+
+ if ct.hasTag {
+ if kind == reflect.Invalid {
+ v.str1 = string(append(ns, cf.altName...))
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ param: ct.param,
+ kind: kind,
+ },
+ )
+ return
+ }
+
+ v.str1 = string(append(ns, cf.altName...))
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+ if !ct.runValidationWhenNil {
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: current.Type(),
+ },
+ )
+ return
+ }
+ }
+
+ case reflect.Struct:
+
+ typ = current.Type()
+
+ if typ != timeType {
+
+ if ct != nil {
+
+ if ct.typeof == typeStructOnly {
+ goto CONTINUE
+ } else if ct.typeof == typeIsDefault {
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !ct.fn(ctx, v) {
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+ return
+ }
+ }
+
+ ct = ct.next
+ }
+
+ if ct != nil && ct.typeof == typeNoStructLevel {
+ return
+ }
+
+ CONTINUE:
+ // if len == 0 then validating using 'Var' or 'VarWithValue'
+ // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
+ // VarWithField - this allows for validating against each field within the struct against a specific value
+ // pretty handy in certain situations
+ if len(cf.name) > 0 {
+ ns = append(append(ns, cf.altName...), '.')
+ structNs = append(append(structNs, cf.name...), '.')
+ }
+
+ v.validateStruct(ctx, parent, current, typ, ns, structNs, ct)
+ return
+ }
+ }
+
+ if !ct.hasTag {
+ return
+ }
+
+ typ = current.Type()
+
+OUTER:
+ for {
+ if ct == nil {
+ return
+ }
+
+ switch ct.typeof {
+
+ case typeOmitEmpty:
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !hasValue(v) {
+ return
+ }
+
+ ct = ct.next
+ continue
+
+ case typeEndKeys:
+ return
+
+ case typeDive:
+
+ ct = ct.next
+
+ // traverse slice or map here
+ // or panic ;)
+ switch kind {
+ case reflect.Slice, reflect.Array:
+
+ var i64 int64
+ reusableCF := &cField{}
+
+ for i := 0; i < current.Len(); i++ {
+
+ i64 = int64(i)
+
+ v.misc = append(v.misc[0:0], cf.name...)
+ v.misc = append(v.misc, '[')
+ v.misc = strconv.AppendInt(v.misc, i64, 10)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.name = string(v.misc)
+
+ if cf.namesEqual {
+ reusableCF.altName = reusableCF.name
+ } else {
+
+ v.misc = append(v.misc[0:0], cf.altName...)
+ v.misc = append(v.misc, '[')
+ v.misc = strconv.AppendInt(v.misc, i64, 10)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.altName = string(v.misc)
+ }
+ v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
+ }
+
+ case reflect.Map:
+
+ var pv string
+ reusableCF := &cField{}
+
+ for _, key := range current.MapKeys() {
+
+ pv = fmt.Sprintf("%v", key.Interface())
+
+ v.misc = append(v.misc[0:0], cf.name...)
+ v.misc = append(v.misc, '[')
+ v.misc = append(v.misc, pv...)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.name = string(v.misc)
+
+ if cf.namesEqual {
+ reusableCF.altName = reusableCF.name
+ } else {
+ v.misc = append(v.misc[0:0], cf.altName...)
+ v.misc = append(v.misc, '[')
+ v.misc = append(v.misc, pv...)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.altName = string(v.misc)
+ }
+
+ if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
+ v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
+ // can be nil when just keys being validated
+ if ct.next != nil {
+ v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
+ }
+ } else {
+ v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
+ }
+ }
+
+ default:
+ // throw error, if not a slice or map then should not have gotten here
+ // bad dive tag
+ panic("dive error! can't dive on a non slice or map")
+ }
+
+ return
+
+ case typeOr:
+
+ v.misc = v.misc[0:0]
+
+ for {
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if ct.fn(ctx, v) {
+
+ // drain rest of the 'or' values, then continue or leave
+ for {
+
+ ct = ct.next
+
+ if ct == nil {
+ return
+ }
+
+ if ct.typeof != typeOr {
+ continue OUTER
+ }
+ }
+ }
+
+ v.misc = append(v.misc, '|')
+ v.misc = append(v.misc, ct.tag...)
+
+ if ct.hasParam {
+ v.misc = append(v.misc, '=')
+ v.misc = append(v.misc, ct.param...)
+ }
+
+ if ct.isBlockEnd || ct.next == nil {
+ // if we get here, no valid 'or' value and no more tags
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ if ct.hasAlias {
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.actualAliasTag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+
+ } else {
+
+ tVal := string(v.misc)[1:]
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tVal,
+ actualTag: tVal,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+ }
+
+ return
+ }
+
+ ct = ct.next
+ }
+
+ default:
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !ct.fn(ctx, v) {
+
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+
+ return
+ }
+ ct = ct.next
+ }
+ }
+
+}
diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go
new file mode 100644
index 0000000..8e27707
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go
@@ -0,0 +1,654 @@
+package validator
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ ut "github.com/go-playground/universal-translator"
+)
+
+const (
+ defaultTagName = "validate"
+ utf8HexComma = "0x2C"
+ utf8Pipe = "0x7C"
+ tagSeparator = ","
+ orSeparator = "|"
+ tagKeySeparator = "="
+ structOnlyTag = "structonly"
+ noStructLevelTag = "nostructlevel"
+ omitempty = "omitempty"
+ isdefault = "isdefault"
+ requiredWithoutAllTag = "required_without_all"
+ requiredWithoutTag = "required_without"
+ requiredWithTag = "required_with"
+ requiredWithAllTag = "required_with_all"
+ requiredIfTag = "required_if"
+ requiredUnlessTag = "required_unless"
+ excludedWithoutAllTag = "excluded_without_all"
+ excludedWithoutTag = "excluded_without"
+ excludedWithTag = "excluded_with"
+ excludedWithAllTag = "excluded_with_all"
+ skipValidationTag = "-"
+ diveTag = "dive"
+ keysTag = "keys"
+ endKeysTag = "endkeys"
+ requiredTag = "required"
+ namespaceSeparator = "."
+ leftBracket = "["
+ rightBracket = "]"
+ restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
+ restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+ restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+)
+
+var (
+ timeDurationType = reflect.TypeOf(time.Duration(0))
+ timeType = reflect.TypeOf(time.Time{})
+
+ defaultCField = &cField{namesEqual: true}
+)
+
+// FilterFunc is the type used to filter fields using
+// StructFiltered(...) function.
+// returning true results in the field being filtered/skiped from
+// validation
+type FilterFunc func(ns []byte) bool
+
+// CustomTypeFunc allows for overriding or adding custom field type handler functions
+// field = field value of the type to return a value to be validated
+// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
+type CustomTypeFunc func(field reflect.Value) interface{}
+
+// TagNameFunc allows for adding of a custom tag name parser
+type TagNameFunc func(field reflect.StructField) string
+
+type internalValidationFuncWrapper struct {
+ fn FuncCtx
+ runValidatinOnNil bool
+}
+
+// Validate contains the validator settings and cache
+type Validate struct {
+ tagName string
+ pool *sync.Pool
+ hasCustomFuncs bool
+ hasTagNameFunc bool
+ tagNameFunc TagNameFunc
+ structLevelFuncs map[reflect.Type]StructLevelFuncCtx
+ customFuncs map[reflect.Type]CustomTypeFunc
+ aliases map[string]string
+ validations map[string]internalValidationFuncWrapper
+ transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc
+ tagCache *tagCache
+ structCache *structCache
+}
+
+// New returns a new instance of 'validate' with sane defaults.
+func New() *Validate {
+
+ tc := new(tagCache)
+ tc.m.Store(make(map[string]*cTag))
+
+ sc := new(structCache)
+ sc.m.Store(make(map[reflect.Type]*cStruct))
+
+ v := &Validate{
+ tagName: defaultTagName,
+ aliases: make(map[string]string, len(bakedInAliases)),
+ validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)),
+ tagCache: tc,
+ structCache: sc,
+ }
+
+ // must copy alias validators for separate validations to be used in each validator instance
+ for k, val := range bakedInAliases {
+ v.RegisterAlias(k, val)
+ }
+
+ // must copy validators for separate validations to be used in each instance
+ for k, val := range bakedInValidators {
+
+ switch k {
+ // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
+ case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag,
+ excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag:
+ _ = v.registerValidation(k, wrapFunc(val), true, true)
+ default:
+ // no need to error check here, baked in will always be valid
+ _ = v.registerValidation(k, wrapFunc(val), true, false)
+ }
+ }
+
+ v.pool = &sync.Pool{
+ New: func() interface{} {
+ return &validate{
+ v: v,
+ ns: make([]byte, 0, 64),
+ actualNs: make([]byte, 0, 64),
+ misc: make([]byte, 32),
+ }
+ },
+ }
+
+ return v
+}
+
+// SetTagName allows for changing of the default tag name of 'validate'
+func (v *Validate) SetTagName(name string) {
+ v.tagName = name
+}
+
+// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
+// validation validation information via context.Context.
+func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
+ errs := make(map[string]interface{})
+ for field, rule := range rules {
+ if reflect.ValueOf(rule).Kind() == reflect.Map && reflect.ValueOf(data[field]).Kind() == reflect.Map {
+ err := v.ValidateMapCtx(ctx, data[field].(map[string]interface{}), rule.(map[string]interface{}))
+ if len(err) > 0 {
+ errs[field] = err
+ }
+ } else if reflect.ValueOf(rule).Kind() == reflect.Map {
+ errs[field] = errors.New("The field: '" + field + "' is not a map to dive")
+ } else {
+ err := v.VarCtx(ctx, data[field], rule.(string))
+ if err != nil {
+ errs[field] = err
+ }
+ }
+ }
+ return errs
+}
+
+// ValidateMap validates map data form a map of tags
+func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
+ return v.ValidateMapCtx(context.Background(), data, rules)
+}
+
+// RegisterTagNameFunc registers a function to get alternate names for StructFields.
+//
+// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
+//
+// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+// if name == "-" {
+// return ""
+// }
+// return name
+// })
+func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
+ v.tagNameFunc = fn
+ v.hasTagNameFunc = true
+}
+
+// RegisterValidation adds a validation with the given tag
+//
+// NOTES:
+// - if the key already exists, the previous validation function will be replaced.
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error {
+ return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...)
+}
+
+// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
+// allowing context.Context validation support.
+func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error {
+ var nilCheckable bool
+ if len(callValidationEvenIfNull) > 0 {
+ nilCheckable = callValidationEvenIfNull[0]
+ }
+ return v.registerValidation(tag, fn, false, nilCheckable)
+}
+
+func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
+ if len(tag) == 0 {
+ return errors.New("Function Key cannot be empty")
+ }
+
+ if fn == nil {
+ return errors.New("Function cannot be empty")
+ }
+
+ _, ok := restrictedTags[tag]
+ if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
+ panic(fmt.Sprintf(restrictedTagErr, tag))
+ }
+ v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
+ return nil
+}
+
+// RegisterAlias registers a mapping of a single validation tag that
+// defines a common or complex set of validation(s) to simplify adding validation
+// to structs.
+//
+// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterAlias(alias, tags string) {
+
+ _, ok := restrictedTags[alias]
+
+ if ok || strings.ContainsAny(alias, restrictedTagChars) {
+ panic(fmt.Sprintf(restrictedAliasErr, alias))
+ }
+
+ v.aliases[alias] = tags
+}
+
+// RegisterStructValidation registers a StructLevelFunc against a number of types.
+//
+// NOTE:
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
+ v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
+}
+
+// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
+// of contextual validation information via context.Context.
+//
+// NOTE:
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
+
+ if v.structLevelFuncs == nil {
+ v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
+ }
+
+ for _, t := range types {
+ tv := reflect.ValueOf(t)
+ if tv.Kind() == reflect.Ptr {
+ t = reflect.Indirect(tv).Interface()
+ }
+
+ v.structLevelFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
+//
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
+
+ if v.customFuncs == nil {
+ v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
+ }
+
+ for _, t := range types {
+ v.customFuncs[reflect.TypeOf(t)] = fn
+ }
+
+ v.hasCustomFuncs = true
+}
+
+// RegisterTranslation registers translations against the provided tag.
+func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
+
+ if v.transTagFunc == nil {
+ v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
+ }
+
+ if err = registerFn(trans); err != nil {
+ return
+ }
+
+ m, ok := v.transTagFunc[trans]
+ if !ok {
+ m = make(map[string]TranslationFunc)
+ v.transTagFunc[trans] = m
+ }
+
+ m[tag] = translationFn
+
+ return
+}
+
+// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) Struct(s interface{}) error {
+ return v.StructCtx(context.Background(), s)
+}
+
+// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
+// and also allows passing of context.Context for contextual validation information.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
+
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = false
+ // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
+
+ vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
+// nested structs, unless otherwise specified.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
+ return v.StructFilteredCtx(context.Background(), s, fn)
+}
+
+// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
+// nested structs, unless otherwise specified and also allows passing of contextual validation information via
+// context.Context
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = fn
+ // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
+
+ vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructPartial validates the fields passed in only, ignoring all others.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructPartial(s interface{}, fields ...string) error {
+ return v.StructPartialCtx(context.Background(), s, fields...)
+}
+
+// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
+// validation validation information via context.Context
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = nil
+ vd.hasExcludes = false
+ vd.includeExclude = make(map[string]struct{})
+
+ typ := val.Type()
+ name := typ.Name()
+
+ for _, k := range fields {
+
+ flds := strings.Split(k, namespaceSeparator)
+ if len(flds) > 0 {
+
+ vd.misc = append(vd.misc[0:0], name...)
+ // Don't append empty name for unnamed structs
+ if len(vd.misc) != 0 {
+ vd.misc = append(vd.misc, '.')
+ }
+
+ for _, s := range flds {
+
+ idx := strings.Index(s, leftBracket)
+
+ if idx != -1 {
+ for idx != -1 {
+ vd.misc = append(vd.misc, s[:idx]...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+
+ idx2 := strings.Index(s, rightBracket)
+ idx2++
+ vd.misc = append(vd.misc, s[idx:idx2]...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ s = s[idx2:]
+ idx = strings.Index(s, leftBracket)
+ }
+ } else {
+
+ vd.misc = append(vd.misc, s...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ }
+
+ vd.misc = append(vd.misc, '.')
+ }
+ }
+ }
+
+ vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructExcept validates all fields except the ones passed in.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructExcept(s interface{}, fields ...string) error {
+ return v.StructExceptCtx(context.Background(), s, fields...)
+}
+
+// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
+// validation validation information via context.Context
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = nil
+ vd.hasExcludes = true
+ vd.includeExclude = make(map[string]struct{})
+
+ typ := val.Type()
+ name := typ.Name()
+
+ for _, key := range fields {
+
+ vd.misc = vd.misc[0:0]
+
+ if len(name) > 0 {
+ vd.misc = append(vd.misc, name...)
+ vd.misc = append(vd.misc, '.')
+ }
+
+ vd.misc = append(vd.misc, key...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ }
+
+ vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// Var validates a single variable using tag style validation.
+// eg.
+// var i int
+// validate.Var(i, "gt=1,lt=10")
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) Var(field interface{}, tag string) error {
+ return v.VarCtx(context.Background(), field, tag)
+}
+
+// VarCtx validates a single variable using tag style validation and allows passing of contextual
+// validation validation information via context.Context.
+// eg.
+// var i int
+// validate.Var(i, "gt=1,lt=10")
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
+ if len(tag) == 0 || tag == skipValidationTag {
+ return nil
+ }
+
+ ctag := v.fetchCacheTag(tag)
+ val := reflect.ValueOf(field)
+ vd := v.pool.Get().(*validate)
+ vd.top = val
+ vd.isPartial = false
+ vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+ v.pool.Put(vd)
+ return
+}
+
+// VarWithValue validates a single variable, against another variable/field's value using tag style validation
+// eg.
+// s1 := "abcd"
+// s2 := "abcd"
+// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
+ return v.VarWithValueCtx(context.Background(), field, other, tag)
+}
+
+// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
+// allows passing of contextual validation validation information via context.Context.
+// eg.
+// s1 := "abcd"
+// s2 := "abcd"
+// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
+ if len(tag) == 0 || tag == skipValidationTag {
+ return nil
+ }
+ ctag := v.fetchCacheTag(tag)
+ otherVal := reflect.ValueOf(other)
+ vd := v.pool.Get().(*validate)
+ vd.top = otherVal
+ vd.isPartial = false
+ vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+ v.pool.Put(vd)
+ return
+}
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
new file mode 100644
index 0000000..3d97fc7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of GoGo authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS file, which
+# lists people. For example, employees are listed in CONTRIBUTORS,
+# but not in AUTHORS, because the employer holds the copyright.
+
+# Names should be added to this file as one of
+# Organization's name
+# Individual's name
+# Individual's name
+
+# Please keep the list sorted.
+
+Sendgrid, Inc
+Vastech SA (PTY) LTD
+Walter Schulze
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1b4f6c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -0,0 +1,23 @@
+Anton Povarov
+Brian Goff
+Clayton Coleman
+Denis Smirnov
+DongYun Kang
+Dwayne Schultz
+Georg Apitz
+Gustav Paul
+Johan Brandhorst
+John Shahid
+John Tuley
+Laurent
+Patrick Lee
+Peter Edge
+Roger Johansson
+Sam Nguyen
+Sergio Arbeo
+Stephen J Day
+Tamir Duberstein
+Todd Eisenberger
+Tormod Erevik Lea
+Vyacheslav Kim
+Walter Schulze
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..f57de90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+
+Protocol Buffers for Go with Gadgets
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..00d65f3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C test_proto
+ make -C proto3_proto
+ make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..a26b046
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,258 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+ emOut := out.Addr().Interface().(extensionsBytes)
+ bIn := emIn.GetExtensions()
+ bOut := emOut.GetExtensions()
+ *bOut = append(*bOut, *bIn...)
+ } else if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
new file mode 100644
index 0000000..2455248
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
@@ -0,0 +1,39 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "reflect"
+
+type custom interface {
+ Marshal() ([]byte, error)
+ Unmarshal(data []byte) error
+ Size() int
+}
+
+var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
new file mode 100644
index 0000000..fe1bd7d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ discardInfo := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ discardInfo.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ discardInfo := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ discardInfo.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
new file mode 100644
index 0000000..93464c9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration.go
@@ -0,0 +1,100 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+const (
+ // Range of a Duration in seconds, as specified in
+ // google/protobuf/duration.proto. This is about 10,000 years in seconds.
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid Duration
+// may still be too large to fit into a time.Duration (the range of Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *duration) error {
+ if d == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %#v: seconds out of range", d)
+ }
+ if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %#v: nanos out of range", d)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+ return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
+ }
+ return nil
+}
+
+// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
+// returns an error if the Duration is invalid or is too large to be
+// represented in a time.Duration.
+func durationFromProto(p *duration) (time.Duration, error) {
+ if err := validateDuration(p); err != nil {
+ return 0, err
+ }
+ d := time.Duration(p.Seconds) * time.Second
+ if int64(d/time.Second) != p.Seconds {
+ return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+ }
+ if p.Nanos != 0 {
+ d += time.Duration(p.Nanos)
+ if (d < 0) != (p.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a Duration.
+func durationProto(d time.Duration) *duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &duration{
+ Seconds: secs,
+ Nanos: int32(nanos),
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
new file mode 100644
index 0000000..e748e17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+ "time"
+)
+
+var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
+
+type duration struct {
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *duration) Reset() { *m = duration{} }
+func (*duration) ProtoMessage() {}
+func (*duration) String() string { return "duration" }
+
+func init() {
+ RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..9581ccd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,205 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ sizVar := SizeVarint(uint64(siz))
+ p.grow(siz + sizVar)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..0f5fb17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,33 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+ return &RequiredNotSetError{field}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..341c6f5
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,605 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ case extensionsBytes:
+ return slowExtensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ if ebase, ok := base.(extensionsBytes); ok {
+ clearExtension(base, id)
+ ext := ebase.GetExtensions()
+ *ext = append(*ext, b...)
+ return
+ }
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if ea, ok := pbi.(slowExtensionAdapter); ok {
+ pbi = ea.extensionsBytes
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ buf := *ext
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint(buf[o:])
+ fieldNum := int32(tag >> 3)
+ if int32(fieldNum) == extension.Field {
+ return true
+ }
+ wireType := int(tag & 0x7)
+ o += n
+ l, err := size(buf[o:], wireType)
+ if err != nil {
+ return false
+ }
+ o += l
+ }
+ return false
+ }
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+ if epb, ok := pb.(extensionsBytes); ok {
+ offset := 0
+ for offset != -1 {
+ offset = deleteExtension(epb, fieldNum, offset)
+ }
+ return
+ }
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, fieldNum)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ return decodeExtensionFromBytes(extension, *ext)
+ }
+
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if cerr := checkExtensionTypes(epb, extension); cerr != nil {
+ return nil, cerr
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ if epb, ok := pb.(extensionsBytes); ok {
+ ClearExtension(pb, extension)
+ newb, err := encodeExtension(extension, value)
+ if err != nil {
+ return err
+ }
+ bb := epb.GetExtensions()
+ *bb = append(*bb, newb...)
+ return nil
+ }
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ if epb, doki := pb.(extensionsBytes); doki {
+ ext := epb.GetExtensions()
+ *ext = []byte{}
+ return
+ }
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..6f1ae12
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,389 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+)
+
+type extensionsBytes interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ GetExtensions() *[]byte
+}
+
+type slowExtensionAdapter struct {
+ extensionsBytes
+}
+
+func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
+ panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
+}
+
+func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ b := s.GetExtensions()
+ m, err := BytesToExtensionsMap(*b)
+ if err != nil {
+ panic(err)
+ }
+ return m, notLocker{}
+}
+
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
+ if reflect.ValueOf(pb).IsNil() {
+ return ifnotset
+ }
+ value, err := GetExtension(pb, extension)
+ if err != nil {
+ return ifnotset
+ }
+ if value == nil {
+ return ifnotset
+ }
+ if value.(*bool) == nil {
+ return ifnotset
+ }
+ return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+ if err := this.Encode(); err != nil {
+ return false
+ }
+ if err := that.Encode(); err != nil {
+ return false
+ }
+ return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+ if err := this.Encode(); err != nil {
+ return 1
+ }
+ if err := that.Encode(); err != nil {
+ return -1
+ }
+ return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfInternalExtension(m extendableProto) (n int) {
+ info := getMarshalInfo(reflect.TypeOf(m))
+ return info.sizeV1Extensions(m.extensionsWrite())
+}
+
+type sortableMapElem struct {
+ field int32
+ ext Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+ s := make(sortableExtensions, 0, len(m))
+ for k, v := range m {
+ s = append(s, &sortableMapElem{field: k, ext: v})
+ }
+ return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+ sort.Sort(this)
+ ss := make([]string, len(this))
+ for i := range this {
+ ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+ }
+ return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromInternalExtension(m extendableProto) string {
+ return StringFromExtensionsMap(m.extensionsWrite())
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+ return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+ m, err := BytesToExtensionsMap(ext)
+ if err != nil {
+ panic(err)
+ }
+ return StringFromExtensionsMap(m)
+}
+
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+ return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
+func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
+ return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+ o := 0
+ for _, e := range m {
+ if err := e.Encode(); err != nil {
+ return 0, err
+ }
+ n := copy(data[o:], e.enc)
+ if n != len(e.enc) {
+ return 0, io.ErrShortBuffer
+ }
+ o += n
+ }
+ return o, nil
+}
+
+func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
+ o := 0
+ end := len(data)
+ for _, e := range m {
+ if err := e.Encode(); err != nil {
+ return 0, err
+ }
+ n := copy(data[end-len(e.enc):], e.enc)
+ if n != len(e.enc) {
+ return 0, io.ErrShortBuffer
+ }
+ end -= n
+ o += n
+ }
+ return o, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+ e := m[id]
+ if err := e.Encode(); err != nil {
+ return nil, err
+ }
+ return e.enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+ switch wire {
+ case WireVarint:
+ _, n := DecodeVarint(buf)
+ return n, nil
+ case WireFixed64:
+ return 8, nil
+ case WireBytes:
+ v, n := DecodeVarint(buf)
+ return int(v) + n, nil
+ case WireFixed32:
+ return 4, nil
+ case WireStartGroup:
+ offset := 0
+ for {
+ u, n := DecodeVarint(buf[offset:])
+ fwire := int(u & 0x7)
+ offset += n
+ if fwire == WireEndGroup {
+ return offset, nil
+ }
+ s, err := size(buf[offset:], wire)
+ if err != nil {
+ return 0, err
+ }
+ offset += s
+ }
+ }
+ return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+ m := make(map[int32]Extension)
+ i := 0
+ for i < len(buf) {
+ tag, n := DecodeVarint(buf[i:])
+ if n <= 0 {
+ return nil, fmt.Errorf("unable to decode varint")
+ }
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ l, err := size(buf[i+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ end := i + int(l) + n
+ m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+ i = end
+ }
+ return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+ ee := Extension{enc: make([]byte, len(e))}
+ copy(ee.enc, e)
+ return ee
+}
+
+func AppendExtension(e Message, tag int32, buf []byte) {
+ if ee, eok := e.(extensionsBytes); eok {
+ ext := ee.GetExtensions()
+ *ext = append(*ext, buf...)
+ return
+ }
+ if ee, eok := e.(extendableProto); eok {
+ m := ee.extensionsWrite()
+ ext := m[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, buf...)
+ m[int32(tag)] = ext
+ }
+}
+
+func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
+ u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
+ ei := u.getExtElemInfo(extension)
+ v := value
+ p := toAddrPointer(&v, ei.isptr)
+ siz := ei.sizer(p, SizeVarint(ei.wiretag))
+ buf := make([]byte, 0, siz)
+ return ei.marshaler(buf, p, ei.wiretag, false)
+}
+
+func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
+ o := 0
+ for o < len(buf) {
+ tag, n := DecodeVarint((buf)[o:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ if o+n > len(buf) {
+ return nil, fmt.Errorf("unable to decode extension")
+ }
+ l, err := size((buf)[o+n:], wireType)
+ if err != nil {
+ return nil, err
+ }
+ if int32(fieldNum) == extension.Field {
+ if o+n+l > len(buf) {
+ return nil, fmt.Errorf("unable to decode extension")
+ }
+ v, err := decodeExtension((buf)[o:o+n+l], extension)
+ if err != nil {
+ return nil, err
+ }
+ return v, nil
+ }
+ o += n + l
+ }
+ return defaultExtensionValue(extension)
+}
+
+func (this *Extension) Encode() error {
+ if this.enc == nil {
+ var err error
+ this.enc, err = encodeExtension(this.desc, this.value)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (this Extension) GoString() string {
+ if err := this.Encode(); err != nil {
+ return fmt.Sprintf("error encoding extension: %v", err)
+ }
+ return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return SetExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
+ typ := reflect.TypeOf(pb).Elem()
+ ext, ok := extensionMaps[typ]
+ if !ok {
+ return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+ }
+ desc, ok := ext[fieldNum]
+ if !ok {
+ return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+ }
+ return GetExtension(pb, desc)
+}
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+ x := &XXX_InternalExtensions{
+ p: new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }),
+ }
+ x.p.extensionMap = m
+ return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+ pb := extendable.(extendableProto)
+ return pb.extensionsWrite()
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+ ext := pb.GetExtensions()
+ for offset < len(*ext) {
+ tag, n1 := DecodeVarint((*ext)[offset:])
+ fieldNum := int32(tag >> 3)
+ wireType := int(tag & 0x7)
+ n2, err := size((*ext)[offset+n1:], wireType)
+ if err != nil {
+ panic(err)
+ }
+ newOffset := offset + n1 + n2
+ if fieldNum == theFieldNum {
+ *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+ return offset
+ }
+ offset = newOffset
+ }
+ return -1
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..80db1c1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,973 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/gogo/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/gogo/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ sindex := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or T or []*T or []T
+ switch f.Kind() {
+ case reflect.Struct:
+ setDefaults(f, recur, zeros)
+
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.Kind() == reflect.Ptr && e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Struct:
+ nestedMessage = true // non-nullable
+
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr, reflect.Struct:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ GoGoProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..b3aa391
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,50 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+type Sizer interface {
+ Size() int
+}
+
+type ProtoSizer interface {
+ ProtoSize() int
+}
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+ s, ok := m[value]
+ if !ok {
+ s = strconv.Itoa(int(value))
+ }
+ return json.Marshal(s)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
new file mode 100644
index 0000000..7ffd3c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
@@ -0,0 +1,59 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+)
+
+// TODO: untested, so probably incorrect.
+
+func (p pointer) getRef() pointer {
+ return pointer{v: p.v.Addr()}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+ slice := p.getSlice(typ)
+ elem := v.asPointerTo(typ).Elem()
+ newSlice := reflect.Append(slice, elem)
+ slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+ sliceTyp := reflect.SliceOf(typ)
+ slice := p.asPointerTo(sliceTyp)
+ slice = slice.Elem()
+ return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..aca8eed
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,56 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func (p pointer) getRef() pointer {
+ return pointer{p: (unsafe.Pointer)(&p.p)}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+ slice := p.getSlice(typ)
+ elem := v.asPointerTo(typ).Elem()
+ newSlice := reflect.Append(slice, elem)
+ slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+ sliceTyp := reflect.SliceOf(typ)
+ slice := p.asPointerTo(sliceTyp)
+ slice = slice.Elem()
+ return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..28da147
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,610 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ CustomType string
+ CastType string
+ StdTime bool
+ StdDuration bool
+ WktPointer bool
+
+ stype reflect.Type // set for struct types only
+ ctype reflect.Type // set for custom types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ log.Printf("proto: tag has too few fields: %q", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ log.Printf("proto: tag has unknown wire type: %q", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ case strings.HasPrefix(f, "embedded="):
+ p.OrigName = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "customtype="):
+ p.CustomType = strings.Split(f, "=")[1]
+ case strings.HasPrefix(f, "casttype="):
+ p.CastType = strings.Split(f, "=")[1]
+ case f == "stdtime":
+ p.StdTime = true
+ case f == "stdduration":
+ p.StdDuration = true
+ case f == "wktptr":
+ p.WktPointer = true
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ isMap := typ.Kind() == reflect.Map
+ if len(p.CustomType) > 0 && !isMap {
+ p.ctype = typ
+ p.setTag(lockGetProp)
+ return
+ }
+ if p.StdTime && !isMap {
+ p.setTag(lockGetProp)
+ return
+ }
+ if p.StdDuration && !isMap {
+ p.setTag(lockGetProp)
+ return
+ }
+ if p.WktPointer && !isMap {
+ p.setTag(lockGetProp)
+ return
+ }
+ switch t1 := typ; t1.Kind() {
+ case reflect.Struct:
+ p.stype = typ
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ case reflect.Struct:
+ p.stype = t3
+ }
+ case reflect.Struct:
+ p.stype = t2
+ }
+
+ case reflect.Map:
+
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+
+ p.MapValProp.CustomType = p.CustomType
+ p.MapValProp.StdDuration = p.StdDuration
+ p.MapValProp.StdTime = p.StdTime
+ p.MapValProp.WktPointer = p.WktPointer
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+ p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ return prop
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ isOneofMessage := false
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ isOneofMessage = true
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ if isOneofMessage {
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
+ }
+ if len(oots) > 0 {
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+ if _, ok := enumStringMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..40ea3dd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,36 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+)
+
+var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
+var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..5a5fd93
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,119 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "io"
+)
+
+func Skip(data []byte) (n int, err error) {
+ l := len(data)
+ index := 0
+ for index < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ index++
+ if data[index-1] < 0x80 {
+ break
+ }
+ }
+ return index, nil
+ case 1:
+ index += 8
+ return index, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ index += length
+ return index, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = index
+ for shift := uint(0); ; shift += 7 {
+ if index >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := data[index]
+ index++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := Skip(data[start:])
+ if err != nil {
+ return 0, err
+ }
+ index = start + next
+ }
+ return index, nil
+ case 4:
+ return index, nil
+ case 5:
+ index += 4
+ return index, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..f8babde
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -0,0 +1,3009 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+
+ hassizer bool // has custom sizer
+ hasprotosizer bool // has custom protosizer
+
+ bytesExtensions field // offset of XXX_extensions where the field type is []byte
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+
+ uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ // Uses the message's Size method if available
+ if u.hassizer {
+ s := ptr.asPointerTo(u.typ).Interface().(Sizer)
+ return s.Size()
+ }
+ // Uses the message's ProtoSize method if available
+ if u.hasprotosizer {
+ s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
+ return s.ProtoSize()
+ }
+
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.bytesExtensions.IsValid() {
+ s := *ptr.offset(u.bytesExtensions).toBytes()
+ n += len(s)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.bytesExtensions.IsValid() {
+ s := *ptr.offset(u.bytesExtensions).toBytes()
+ b = append(b, s...)
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.bytesExtensions = invalidField
+ u.sizecache = invalidField
+ isOneofMessage := false
+
+ if reflect.PtrTo(t).Implements(sizerType) {
+ u.hassizer = true
+ }
+ if reflect.PtrTo(t).Implements(protosizerType) {
+ u.hasprotosizer = true
+ }
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ isOneofMessage = true
+ }
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ if f.Type.Kind() == reflect.Map {
+ u.v1extensions = toField(&f)
+ } else {
+ u.bytesExtensions = toField(&f)
+ }
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
+ if isOneofMessage {
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizr, marshalr := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizr,
+ marshaler: marshalr,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizr,
+ marshaler: marshalr,
+ }
+ }
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ ctype := false
+ isTime := false
+ isDuration := false
+ isWktPointer := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ if strings.HasPrefix(tags[i], "customtype=") {
+ ctype = true
+ }
+ if tags[i] == "stdtime" {
+ isTime = true
+ }
+ if tags[i] == "stdduration" {
+ isDuration = true
+ }
+ if tags[i] == "wktptr" {
+ isWktPointer = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+ if !proto3 && !pointer && !slice {
+ nozero = false
+ }
+
+ if ctype {
+ if reflect.PtrTo(t).Implements(customType) {
+ if slice {
+ return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+ }
+ if pointer {
+ return makeCustomPtrMarshaler(getMarshalInfo(t))
+ }
+ return makeCustomMarshaler(getMarshalInfo(t))
+ } else {
+ panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+ }
+ }
+
+ if isTime {
+ if pointer {
+ if slice {
+ return makeTimePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeTimePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeTimeSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeTimeMarshaler(getMarshalInfo(t))
+ }
+
+ if isDuration {
+ if pointer {
+ if slice {
+ return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeDurationPtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeDurationSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeDurationMarshaler(getMarshalInfo(t))
+ }
+
+ if isWktPointer {
+ switch t.Kind() {
+ case reflect.Float64:
+ if pointer {
+ if slice {
+ return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdDoubleValueMarshaler(getMarshalInfo(t))
+ case reflect.Float32:
+ if pointer {
+ if slice {
+ return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdFloatValueMarshaler(getMarshalInfo(t))
+ case reflect.Int64:
+ if pointer {
+ if slice {
+ return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdInt64ValueMarshaler(getMarshalInfo(t))
+ case reflect.Uint64:
+ if pointer {
+ if slice {
+ return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
+ case reflect.Int32:
+ if pointer {
+ if slice {
+ return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdInt32ValueMarshaler(getMarshalInfo(t))
+ case reflect.Uint32:
+ if pointer {
+ if slice {
+ return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
+ case reflect.Bool:
+ if pointer {
+ if slice {
+ return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdBoolValueMarshaler(getMarshalInfo(t))
+ case reflect.String:
+ if pointer {
+ if slice {
+ return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdStringValueMarshaler(getMarshalInfo(t))
+ case uint8SliceType:
+ if pointer {
+ if slice {
+ return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
+ }
+ if slice {
+ return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeStdBytesValueMarshaler(getMarshalInfo(t))
+ default:
+ panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if pointer {
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ } else {
+ if slice {
+ return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageRefMarshaler(getMarshalInfo(t))
+ }
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ stdOptions := false
+ for _, t := range tags {
+ if strings.HasPrefix(t, "customtype=") {
+ valTags = append(valTags, t)
+ }
+ if t == "stdtime" {
+ valTags = append(valTags, t)
+ stdOptions = true
+ }
+ if t == "stdduration" {
+ valTags = append(valTags, t)
+ stdOptions = true
+ }
+ if t == "wktptr" {
+ valTags = append(valTags, t)
+ }
+ }
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if p.deterministic {
+ if _, ok := pb.(Marshaler); ok {
+ return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
+ }
+ }
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
+ pp, err = m.XXX_Marshal(pp, p.deterministic)
+ p.buf = append(p.buf, pp...)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ var b []byte
+ b, err = m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
new file mode 100644
index 0000000..997f57c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
@@ -0,0 +1,388 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+ "time"
+)
+
+// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
+// It marshal a message T instead of a *T
+func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ siz := u.size(ptr)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(ptr)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, ptr, deterministic)
+ }
+}
+
+// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
+// It marshals a slice of messages []T instead of []*T
+func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ e := elem.Interface()
+ v := toAddrPointer(&e, false)
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ var err, errreq error
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ e := elem.Interface()
+ v := toAddrPointer(&e, false)
+ b = appendVarint(b, wiretag)
+ siz := u.size(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+
+ return b, errreq
+ }
+}
+
+func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+ siz := m.Size()
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+ siz := m.Size()
+ buf, err := m.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(u.typ).Interface().(custom)
+ siz := m.Size()
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(u.typ).Interface().(custom)
+ siz := m.Size()
+ buf, err := m.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return 0
+ }
+ siz := Size(ts)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return nil, err
+ }
+ buf, err := Marshal(ts)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return 0
+ }
+ siz := Size(ts)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return nil, err
+ }
+ buf, err := Marshal(ts)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(time.Time)
+ ts, err := timestampProto(t)
+ if err != nil {
+ return 0
+ }
+ siz := Size(ts)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(time.Time)
+ ts, err := timestampProto(t)
+ if err != nil {
+ return nil, err
+ }
+ siz := Size(ts)
+ buf, err := Marshal(ts)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return 0
+ }
+ siz := Size(ts)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*time.Time)
+ ts, err := timestampProto(*t)
+ if err != nil {
+ return nil, err
+ }
+ siz := Size(ts)
+ buf, err := Marshal(ts)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+ dur := durationProto(*d)
+ siz := Size(dur)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+ dur := durationProto(*d)
+ buf, err := Marshal(dur)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+ dur := durationProto(*d)
+ siz := Size(dur)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+ dur := durationProto(*d)
+ buf, err := Marshal(dur)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ d := elem.Interface().(time.Duration)
+ dur := durationProto(d)
+ siz := Size(dur)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ d := elem.Interface().(time.Duration)
+ dur := durationProto(d)
+ siz := Size(dur)
+ buf, err := Marshal(dur)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ d := elem.Interface().(*time.Duration)
+ dur := durationProto(*d)
+ siz := Size(dur)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ d := elem.Interface().(*time.Duration)
+ dur := durationProto(*d)
+ siz := Size(dur)
+ buf, err := Marshal(dur)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..60dcf70
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -0,0 +1,676 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case isSlice && !isPointer: // E.g. []pb.T
+ mergeInfo := getMergeInfo(tf)
+ zero := reflect.Zero(tf)
+ mfi.merge = func(dst, src pointer) {
+ // TODO: Make this faster?
+ dstsp := dst.asPointerTo(f.Type)
+ dsts := dstsp.Elem()
+ srcs := src.asPointerTo(f.Type).Elem()
+ for i := 0; i < srcs.Len(); i++ {
+ dsts = reflect.Append(dsts, zero)
+ srcElement := srcs.Index(i).Addr()
+ dstElement := dsts.Index(dsts.Len() - 1).Addr()
+ mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
+ }
+ if dsts.IsNil() {
+ dsts = reflect.MakeSlice(f.Type, 0, 0)
+ }
+ dstsp.Elem().Set(dsts)
+ }
+ case !isPointer:
+ mergeInfo := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ mergeInfo.merge(dst, src)
+ }
+ case isSlice: // E.g., []*pb.T
+ mergeInfo := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mergeInfo.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mergeInfo := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mergeInfo.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..9372293
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2249 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.bytesExtensions.IsValid() {
+ z = m.offset(u.bytesExtensions).toBytes()
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+ u.bytesExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
+ u.oldExtensions = toField(&f)
+ continue
+ } else if f.Type == reflect.TypeOf(([]byte)(nil)) {
+ u.bytesExtensions = toField(&f)
+ continue
+ }
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
+ if len(oneofFields) > 0 {
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ ctype := false
+ isTime := false
+ isDuration := false
+ isWktPointer := false
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ if strings.HasPrefix(tag, "customtype=") {
+ ctype = true
+ }
+ if tag == "stdtime" {
+ isTime = true
+ }
+ if tag == "stdduration" {
+ isDuration = true
+ }
+ if tag == "wktptr" {
+ isWktPointer = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ if ctype {
+ if reflect.PtrTo(t).Implements(customType) {
+ if slice {
+ return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
+ }
+ if pointer {
+ return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalCustom(getUnmarshalInfo(t), name)
+ } else {
+ panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+ }
+ }
+
+ if isTime {
+ if pointer {
+ if slice {
+ return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalTime(getUnmarshalInfo(t), name)
+ }
+
+ if isDuration {
+ if pointer {
+ if slice {
+ return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalDuration(getUnmarshalInfo(t), name)
+ }
+
+ if isWktPointer {
+ switch t.Kind() {
+ case reflect.Float64:
+ if pointer {
+ if slice {
+ return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Float32:
+ if pointer {
+ if slice {
+ return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Int64:
+ if pointer {
+ if slice {
+ return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Uint64:
+ if pointer {
+ if slice {
+ return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Int32:
+ if pointer {
+ if slice {
+ return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Uint32:
+ if pointer {
+ if slice {
+ return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.Bool:
+ if pointer {
+ if slice {
+ return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
+ case reflect.String:
+ if pointer {
+ if slice {
+ return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
+ case uint8SliceType:
+ if pointer {
+ if slice {
+ return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ if slice {
+ return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+ }
+ return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
+ default:
+ panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+ }
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessage(getUnmarshalInfo(t), name)
+ }
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ for _, t := range tagArray {
+ if strings.HasPrefix(t, "customtype=") {
+ valTags = append(valTags, t)
+ }
+ if t == "stdtime" {
+ valTags = append(valTags, t)
+ }
+ if t == "stdduration" {
+ valTags = append(valTags, t)
+ }
+ if t == "wktptr" {
+ valTags = append(valTags, t)
+ }
+ }
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) == 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
new file mode 100644
index 0000000..00d6c7a
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
@@ -0,0 +1,385 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "io"
+ "reflect"
+)
+
+func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f // gogo: changed from v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.New(sub.typ))
+ m := s.Interface().(custom)
+ if err := m.Unmarshal(b[:x]); err != nil {
+ return nil, err
+ }
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := reflect.New(sub.typ)
+ c := m.Interface().(custom)
+ if err := c.Unmarshal(b[:x]); err != nil {
+ return nil, err
+ }
+ v := valToPointer(m)
+ f.appendRef(v, sub.typ)
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ m := f.asPointerTo(sub.typ).Interface().(custom)
+ if err := m.Unmarshal(b[:x]); err != nil {
+ return nil, err
+ }
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := ×tamp{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ t, err := timestampFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(t))
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := ×tamp{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ t, err := timestampFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&t))
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := ×tamp{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ t, err := timestampFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&t))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := ×tamp{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ t, err := timestampFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(t))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &duration{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ d, err := durationFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&d))
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &duration{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ d, err := durationFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(d))
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &duration{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ d, err := durationFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&d))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &duration{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ d, err := durationFromProto(m)
+ if err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(d))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..87416af
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,930 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if len(props.Enum) > 0 {
+ if err := tm.writeEnum(w, v, props); err != nil {
+ return err
+ }
+ } else if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ if len(props.Enum) > 0 {
+ if err := tm.writeEnum(w, fv, props); err != nil {
+ return err
+ }
+ } else if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv
+ if pv.CanAddr() {
+ pv = sv.Addr()
+ } else {
+ pv = reflect.New(sv.Type())
+ pv.Elem().Set(sv)
+ }
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ if props != nil {
+ if len(props.CustomType) > 0 {
+ custom, ok := v.Interface().(Marshaler)
+ if ok {
+ data, err := custom.Marshal()
+ if err != nil {
+ return err
+ }
+ if err := writeString(w, string(data)); err != nil {
+ return err
+ }
+ return nil
+ }
+ } else if len(props.CastType) > 0 {
+ if _, ok := v.Interface().(interface {
+ String() string
+ }); ok {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ _, err := fmt.Fprintf(w, "%d", v.Interface())
+ return err
+ }
+ }
+ } else if props.StdTime {
+ t, ok := v.Interface().(time.Time)
+ if !ok {
+ return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
+ }
+ tproto, err := timestampProto(t)
+ if err != nil {
+ return err
+ }
+ propsCopy := *props // Make a copy so that this is goroutine-safe
+ propsCopy.StdTime = false
+ err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
+ return err
+ } else if props.StdDuration {
+ d, ok := v.Interface().(time.Duration)
+ if !ok {
+ return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
+ }
+ dproto := durationProto(d)
+ propsCopy := *props // Make a copy so that this is goroutine-safe
+ propsCopy.StdDuration = false
+ err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
+ return err
+ }
+ }
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if v.Type().Implements(textMarshalerType) {
+ text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+ return ferr
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, werr := w.Write(endBraceNewline); werr != nil {
+ return werr
+ }
+ continue
+ }
+ if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+ return ferr
+ }
+ if wire != WireStartGroup {
+ if err = w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err = w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ e := pv.Interface().(Message)
+
+ var m map[int32]Extension
+ var mu sync.Locker
+ if em, ok := e.(extensionsBytes); ok {
+ eb := em.GetExtensions()
+ var err error
+ m, err = BytesToExtensionsMap(*eb)
+ if err != nil {
+ return err
+ }
+ mu = notLocker{}
+ } else if _, ok := e.(extendableProto); ok {
+ ep, _ := extendable(e)
+ m, mu = ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ }
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(e, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..1d6c6aa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,57 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+ m, ok := enumStringMaps[props.Enum]
+ if !ok {
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ key := int32(0)
+ if v.Kind() == reflect.Ptr {
+ key = int32(v.Elem().Int())
+ } else {
+ key = int32(v.Int())
+ }
+ s, ok := m[key]
+ if !ok {
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ }
+ _, err := fmt.Fprint(w, s)
+ return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..f85c0cc
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,1018 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+ if len(props.CustomType) > 0 {
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ tc := reflect.TypeOf(new(Marshaler))
+ ok := t.Elem().Implements(tc.Elem())
+ if ok {
+ fv := v
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.ValueOf(custom))
+ } else {
+ custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+ err := custom.Unmarshal([]byte(tok.unquoted))
+ if err != nil {
+ return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+ }
+ v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+ }
+ return nil
+ }
+ if props.StdTime {
+ fv := v
+ p.back()
+ props.StdTime = false
+ tproto := ×tamp{}
+ err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
+ props.StdTime = true
+ if err != nil {
+ return err
+ }
+ tim, err := timestampFromProto(tproto)
+ if err != nil {
+ return err
+ }
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ if t.Elem().Kind() == reflect.Ptr {
+ ts := fv.Interface().([]*time.Time)
+ ts = append(ts, &tim)
+ fv.Set(reflect.ValueOf(ts))
+ return nil
+ } else {
+ ts := fv.Interface().([]time.Time)
+ ts = append(ts, tim)
+ fv.Set(reflect.ValueOf(ts))
+ return nil
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ v.Set(reflect.ValueOf(&tim))
+ } else {
+ v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
+ }
+ return nil
+ }
+ if props.StdDuration {
+ fv := v
+ p.back()
+ props.StdDuration = false
+ dproto := &duration{}
+ err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
+ props.StdDuration = true
+ if err != nil {
+ return err
+ }
+ dur, err := durationFromProto(dproto)
+ if err != nil {
+ return err
+ }
+ if props.Repeated {
+ t := reflect.TypeOf(v.Interface())
+ if t.Kind() == reflect.Slice {
+ if t.Elem().Kind() == reflect.Ptr {
+ ds := fv.Interface().([]*time.Duration)
+ ds = append(ds, &dur)
+ fv.Set(reflect.ValueOf(ds))
+ return nil
+ } else {
+ ds := fv.Interface().([]time.Duration)
+ ds = append(ds, dur)
+ fv.Set(reflect.ValueOf(ds))
+ return nil
+ }
+ }
+ }
+ if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+ v.Set(reflect.ValueOf(&dur))
+ } else {
+ v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
+ }
+ return nil
+ }
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ ntok := p.next()
+ if ntok.err != nil {
+ return ntok.err
+ }
+ if ntok.value == "]" {
+ break
+ }
+ if ntok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", ntok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int8:
+ if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+ case reflect.Int16:
+ if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint8:
+ if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ case reflect.Uint16:
+ if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
new file mode 100644
index 0000000..9324f65
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go
@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
+
+// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func timestampFromProto(ts *timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func timestampProto(t time.Time) (*timestamp, error) {
+ seconds := t.Unix()
+ nanos := int32(t.Sub(time.Unix(seconds, 0)))
+ ts := ×tamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
new file mode 100644
index 0000000..38439fa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "reflect"
+ "time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+
+type timestamp struct {
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *timestamp) Reset() { *m = timestamp{} }
+func (*timestamp) ProtoMessage() {}
+func (*timestamp) String() string { return "timestamp" }
+
+func init() {
+ RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..b175d1b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go
@@ -0,0 +1,1888 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "io"
+ "reflect"
+)
+
+func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*float64)
+ v := &float64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*float64)
+ v := &float64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+ v := &float64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+ v := &float64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(float64)
+ v := &float64Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(float64)
+ v := &float64Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*float64)
+ v := &float64Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*float64)
+ v := &float64Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*float32)
+ v := &float32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*float32)
+ v := &float32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+ v := &float32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+ v := &float32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(float32)
+ v := &float32Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(float32)
+ v := &float32Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*float32)
+ v := &float32Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*float32)
+ v := &float32Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &float32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*int64)
+ v := &int64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*int64)
+ v := &int64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+ v := &int64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+ v := &int64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(int64)
+ v := &int64Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(int64)
+ v := &int64Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*int64)
+ v := &int64Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*int64)
+ v := &int64Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+ v := &uint64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+ v := &uint64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+ v := &uint64Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+ v := &uint64Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(uint64)
+ v := &uint64Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(uint64)
+ v := &uint64Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*uint64)
+ v := &uint64Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*uint64)
+ v := &uint64Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint64Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*int32)
+ v := &int32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*int32)
+ v := &int32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+ v := &int32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+ v := &int32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(int32)
+ v := &int32Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(int32)
+ v := &int32Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*int32)
+ v := &int32Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*int32)
+ v := &int32Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &int32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+ v := &uint32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+ v := &uint32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+ v := &uint32Value{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+ v := &uint32Value{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(uint32)
+ v := &uint32Value{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(uint32)
+ v := &uint32Value{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*uint32)
+ v := &uint32Value{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*uint32)
+ v := &uint32Value{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &uint32Value{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*bool)
+ v := &boolValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*bool)
+ v := &boolValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+ v := &boolValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+ v := &boolValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(bool)
+ v := &boolValue{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(bool)
+ v := &boolValue{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*bool)
+ v := &boolValue{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*bool)
+ v := &boolValue{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &boolValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &boolValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &boolValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &boolValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*string)
+ v := &stringValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*string)
+ v := &stringValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+ v := &stringValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+ v := &stringValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(string)
+ v := &stringValue{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(string)
+ v := &stringValue{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*string)
+ v := &stringValue{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*string)
+ v := &stringValue{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &stringValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &stringValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &stringValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &stringValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+ v := &bytesValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+ v := &bytesValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ if ptr.isNil() {
+ return 0
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+ v := &bytesValue{*t}
+ siz := Size(v)
+ return tagsize + SizeVarint(uint64(siz)) + siz
+ }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ if ptr.isNil() {
+ return b, nil
+ }
+ t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+ v := &bytesValue{*t}
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(buf)))
+ b = append(b, buf...)
+ return b, nil
+ }
+}
+
+func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(u.typ)
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().([]byte)
+ v := &bytesValue{t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(u.typ)
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().([]byte)
+ v := &bytesValue{t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ n := 0
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*[]byte)
+ v := &bytesValue{*t}
+ siz := Size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getSlice(reflect.PtrTo(u.typ))
+ for i := 0; i < s.Len(); i++ {
+ elem := s.Index(i)
+ t := elem.Interface().(*[]byte)
+ v := &bytesValue{*t}
+ siz := Size(v)
+ buf, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(siz))
+ b = append(b, buf...)
+ }
+
+ return b, nil
+ }
+}
+
+func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &bytesValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(sub.typ).Elem()
+ s.Set(reflect.ValueOf(m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &bytesValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+ s.Set(reflect.ValueOf(&m.Value))
+ return b[x:], nil
+ }
+}
+
+func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &bytesValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(reflect.PtrTo(sub.typ))
+ newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
+
+func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return nil, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ m := &bytesValue{}
+ if err := Unmarshal(b[:x], m); err != nil {
+ return nil, err
+ }
+ slice := f.getSlice(sub.typ)
+ newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+ slice.Set(newSlice)
+ return b[x:], nil
+ }
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
new file mode 100644
index 0000000..c1cf7bf
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
@@ -0,0 +1,113 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+type float64Value struct {
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float64Value) Reset() { *m = float64Value{} }
+func (*float64Value) ProtoMessage() {}
+func (*float64Value) String() string { return "float64" }
+
+type float32Value struct {
+ Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float32Value) Reset() { *m = float32Value{} }
+func (*float32Value) ProtoMessage() {}
+func (*float32Value) String() string { return "float32" }
+
+type int64Value struct {
+ Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int64Value) Reset() { *m = int64Value{} }
+func (*int64Value) ProtoMessage() {}
+func (*int64Value) String() string { return "int64" }
+
+type uint64Value struct {
+ Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint64Value) Reset() { *m = uint64Value{} }
+func (*uint64Value) ProtoMessage() {}
+func (*uint64Value) String() string { return "uint64" }
+
+type int32Value struct {
+ Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int32Value) Reset() { *m = int32Value{} }
+func (*int32Value) ProtoMessage() {}
+func (*int32Value) String() string { return "int32" }
+
+type uint32Value struct {
+ Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint32Value) Reset() { *m = uint32Value{} }
+func (*uint32Value) ProtoMessage() {}
+func (*uint32Value) String() string { return "uint32" }
+
+type boolValue struct {
+ Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *boolValue) Reset() { *m = boolValue{} }
+func (*boolValue) ProtoMessage() {}
+func (*boolValue) String() string { return "bool" }
+
+type stringValue struct {
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *stringValue) Reset() { *m = stringValue{} }
+func (*stringValue) ProtoMessage() {}
+func (*stringValue) String() string { return "string" }
+
+type bytesValue struct {
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *bytesValue) Reset() { *m = bytesValue{} }
+func (*bytesValue) ProtoMessage() {}
+func (*bytesValue) String() string { return "[]byte" }
+
+func init() {
+ RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
+ RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
+ RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
+ RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
+ RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
+ RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
+ RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
+ RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
+ RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
+}
diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
new file mode 100644
index 0000000..ceadde6
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
@@ -0,0 +1,101 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package sortkeys
+
+import (
+ "sort"
+)
+
+func Strings(l []string) {
+ sort.Strings(l)
+}
+
+func Float64s(l []float64) {
+ sort.Float64s(l)
+}
+
+func Float32s(l []float32) {
+ sort.Sort(Float32Slice(l))
+}
+
+func Int64s(l []int64) {
+ sort.Sort(Int64Slice(l))
+}
+
+func Int32s(l []int32) {
+ sort.Sort(Int32Slice(l))
+}
+
+func Uint64s(l []uint64) {
+ sort.Sort(Uint64Slice(l))
+}
+
+func Uint32s(l []uint32) {
+ sort.Sort(Uint32Slice(l))
+}
+
+func Bools(l []bool) {
+ sort.Sort(BoolSlice(l))
+}
+
+type BoolSlice []bool
+
+func (p BoolSlice) Len() int { return len(p) }
+func (p BoolSlice) Less(i, j int) bool { return p[j] }
+func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Int32Slice []int32
+
+func (p Int32Slice) Len() int { return len(p) }
+func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Uint32Slice []uint32
+
+func (p Uint32Slice) Len() int { return len(p) }
+func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+type Float32Slice []float32
+
+func (p Float32Slice) Len() int { return len(p) }
+func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
new file mode 100644
index 0000000..061d72a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - master
+
+script:
+ - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 0000000..97c1b34
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement. This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+ * If you are an individual writing original source code and you're sure you
+ own the intellectual property, then you'll need to sign an [individual
+ CLA][].
+
+ * If you work for a company that wants to allow you to contribute your work,
+ then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+ 1. It's generally best to start by opening a new issue describing the bug or
+ feature you're intending to fix. Even if you think it's relatively minor,
+ it's helpful to know what people are working on. Mention in the initial
+ issue that you are planning to work on that bug or feature so that it can
+ be assigned to you.
+
+ 1. Follow the normal process of [forking][] the project, and setup a new
+ branch to work in. It's important that each group of changes be done in
+ separate branches in order to ensure that a pull request only includes the
+ commits related to that bug or feature.
+
+ 1. Go makes it very simple to ensure properly formatted code, so always run
+ `go fmt` on your code before committing it. You should also run
+ [golint][] over your code. As noted in the [golint readme][], it's not
+ strictly necessary that your code be completely "lint-free", but this will
+ help you find common style issues.
+
+ 1. Any significant changes should almost always be accompanied by tests. The
+ project already has good test coverage, so look at some of the existing
+ tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
+ are invaluable tools for seeing which parts of your code aren't being
+ exercised by your tests.
+
+ 1. Do your best to have [well-formed commit messages][] for each change.
+ This provides consistency throughout the project, and ensures that commit
+ messages are able to be formatted properly by various git tools.
+
+ 1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 0000000..b503aae
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,89 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[](https://godoc.org/github.com/google/gofuzz)
+[](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```go
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```go
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```go
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+ A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```go
+type MyEnum string
+const (
+ A MyEnum = "A"
+ B MyEnum = "B"
+)
+type MyInfo struct {
+ Type MyEnum
+ AInfo *string
+ BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+ func(e *MyInfo, c fuzz.Continue) {
+ switch c.Intn(2) {
+ case 0:
+ e.Type = A
+ c.Fuzz(&e.AInfo)
+ case 1:
+ e.Type = B
+ c.Fuzz(&e.BInfo)
+ }
+ },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
+go-fuzz provides the user a byte-slice, which should be converted to different inputs
+for the tested function. This library can help convert the byte slice. Consider for
+example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments:
+```go
+// +build gofuzz
+package mypackage
+
+import fuzz "github.com/google/gofuzz"
+
+func Fuzz(data []byte) int {
+ var i int
+ fuzz.NewFromGoFuzz(data).Fuzz(&i)
+ MyFunc(i)
+ return 0
+}
+```
+
+Happy testing!
diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go
new file mode 100644
index 0000000..5bb3659
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package bytesource provides a rand.Source64 that is determined by a slice of bytes.
+package bytesource
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "math/rand"
+)
+
+// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are
+// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a
+// fallback pseudo random source is created in case more random numbers are required.
+// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly.
+type ByteSource struct {
+ *bytes.Reader
+ fallback rand.Source
+}
+
+// New returns a new ByteSource from a given slice of bytes.
+func New(input []byte) *ByteSource {
+ s := &ByteSource{
+ Reader: bytes.NewReader(input),
+ fallback: rand.NewSource(0),
+ }
+ if len(input) > 0 {
+ s.fallback = rand.NewSource(int64(s.consumeUint64()))
+ }
+ return s
+}
+
+func (s *ByteSource) Uint64() uint64 {
+ // Return from input if it was not exhausted.
+ if s.Len() > 0 {
+ return s.consumeUint64()
+ }
+
+ // Input was exhausted, return random number from fallback (in this case fallback should not be
+ // nil). Try first having a Uint64 output (Should work in current rand implementation),
+ // otherwise return a conversion of Int63.
+ if s64, ok := s.fallback.(rand.Source64); ok {
+ return s64.Uint64()
+ }
+ return uint64(s.fallback.Int63())
+}
+
+func (s *ByteSource) Int63() int64 {
+ return int64(s.Uint64() >> 1)
+}
+
+func (s *ByteSource) Seed(seed int64) {
+ s.fallback = rand.NewSource(seed)
+ s.Reader = bytes.NewReader(nil)
+}
+
+// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the
+// bytes reader is not empty.
+func (s *ByteSource) consumeUint64() uint64 {
+ var bytes [8]byte
+ _, err := s.Read(bytes[:])
+ if err != nil && err != io.EOF {
+ panic("failed reading source") // Should not happen.
+ }
+ return binary.BigEndian.Uint64(bytes[:])
+}
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 0000000..9f9956d
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 0000000..761520a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,605 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+ "fmt"
+ "math/rand"
+ "reflect"
+ "regexp"
+ "time"
+
+ "github.com/google/gofuzz/bytesource"
+ "strings"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+ fuzzFuncs fuzzFuncMap
+ defaultFuzzFuncs fuzzFuncMap
+ r *rand.Rand
+ nilChance float64
+ minElements int
+ maxElements int
+ maxDepth int
+ skipFieldPatterns []*regexp.Regexp
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+ return NewWithSeed(time.Now().UnixNano())
+}
+
+func NewWithSeed(seed int64) *Fuzzer {
+ f := &Fuzzer{
+ defaultFuzzFuncs: fuzzFuncMap{
+ reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+ },
+
+ fuzzFuncs: fuzzFuncMap{},
+ r: rand.New(rand.NewSource(seed)),
+ nilChance: .2,
+ minElements: 1,
+ maxElements: 10,
+ maxDepth: 100,
+ }
+ return f
+}
+
+// NewFromGoFuzz is a helper function that enables using gofuzz (this
+// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
+// fuzzing. Essentially, it enables translating the fuzzing bytes from
+// go-fuzz to any Go object using this library.
+//
+// This implementation promises a constant translation from a given slice of
+// bytes to the fuzzed objects. This promise will remain over future
+// versions of Go and of this library.
+//
+// Note: the returned Fuzzer should not be shared between multiple goroutines,
+// as its deterministic output will no longer be available.
+//
+// Example: use go-fuzz to test the function `MyFunc(int)` in the package
+// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
+//
+// // +build gofuzz
+// package mypacakge
+// import fuzz "github.com/google/gofuzz"
+// func Fuzz(data []byte) int {
+// var i int
+// fuzz.NewFromGoFuzz(data).Fuzz(&i)
+// MyFunc(i)
+// return 0
+// }
+func NewFromGoFuzz(data []byte) *Fuzzer {
+ return New().RandSource(bytesource.New(data))
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+ for i := range fuzzFuncs {
+ v := reflect.ValueOf(fuzzFuncs[i])
+ if v.Kind() != reflect.Func {
+ panic("Need only funcs!")
+ }
+ t := v.Type()
+ if t.NumIn() != 2 || t.NumOut() != 0 {
+ panic("Need 2 in and 0 out params!")
+ }
+ argT := t.In(0)
+ switch argT.Kind() {
+ case reflect.Ptr, reflect.Map:
+ default:
+ panic("fuzzFunc must take pointer or map type")
+ }
+ if t.In(1) != reflect.TypeOf(Continue{}) {
+ panic("fuzzFunc's second parameter must be type fuzz.Continue")
+ }
+ f.fuzzFuncs[argT] = v
+ }
+ return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+ f.r = rand.New(s)
+ return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+ if p < 0 || p > 1 {
+ panic("p should be between 0 and 1, inclusive.")
+ }
+ f.nilChance = p
+ return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+ if atLeast > atMost {
+ panic("atLeast must be <= atMost")
+ }
+ if atLeast < 0 {
+ panic("atLeast must be >= 0")
+ }
+ f.minElements = atLeast
+ f.maxElements = atMost
+ return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+ if f.minElements == f.maxElements {
+ return f.minElements
+ }
+ return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+ return f.r.Float64() >= f.nilChance
+}
+
+// MaxDepth sets the maximum number of recursive fuzz calls that will be made
+// before stopping. This includes struct members, pointers, and map and slice
+// elements.
+func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
+ f.maxDepth = d
+ return f
+}
+
+// Skip fields which match the supplied pattern. Call this multiple times if needed
+// This is useful to skip XXX_ fields generated by protobuf
+func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
+ f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
+ return f
+}
+
+// Fuzz recursively fills all of obj's fields with something random. First
+// this tries to find a custom fuzz function (see Funcs). If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
+// there is a default fuzz function provided by this package. If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// This is safe for cyclic or tree-like structs, up to a limit. Use the
+// MaxDepth method to adjust how deep you need it to recurse.
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks,
+// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
+// fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.fuzzWithContext(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ f.fuzzWithContext(v, flagNoCustomFuzz)
+}
+
+const (
+ // Do not try to find a custom fuzz function. Does not apply recursively.
+ flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
+ fc := &fuzzerContext{fuzzer: f}
+ fc.doFuzz(v, flags)
+}
+
+// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
+// be thread-safe.
+type fuzzerContext struct {
+ fuzzer *Fuzzer
+ curDepth int
+}
+
+func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
+ if fc.curDepth >= fc.fuzzer.maxDepth {
+ return
+ }
+ fc.curDepth++
+ defer func() { fc.curDepth-- }()
+
+ if !v.CanSet() {
+ return
+ }
+
+ if flags&flagNoCustomFuzz == 0 {
+ // Check for both pointer and non-pointer custom functions.
+ if v.CanAddr() && fc.tryCustom(v.Addr()) {
+ return
+ }
+ if fc.tryCustom(v) {
+ return
+ }
+ }
+
+ if fn, ok := fillFuncMap[v.Kind()]; ok {
+ fn(v, fc.fuzzer.r)
+ return
+ }
+
+ switch v.Kind() {
+ case reflect.Map:
+ if fc.fuzzer.genShouldFill() {
+ v.Set(reflect.MakeMap(v.Type()))
+ n := fc.fuzzer.genElementCount()
+ for i := 0; i < n; i++ {
+ key := reflect.New(v.Type().Key()).Elem()
+ fc.doFuzz(key, 0)
+ val := reflect.New(v.Type().Elem()).Elem()
+ fc.doFuzz(val, 0)
+ v.SetMapIndex(key, val)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Ptr:
+ if fc.fuzzer.genShouldFill() {
+ v.Set(reflect.New(v.Type().Elem()))
+ fc.doFuzz(v.Elem(), 0)
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Slice:
+ if fc.fuzzer.genShouldFill() {
+ n := fc.fuzzer.genElementCount()
+ v.Set(reflect.MakeSlice(v.Type(), n, n))
+ for i := 0; i < n; i++ {
+ fc.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Array:
+ if fc.fuzzer.genShouldFill() {
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ fc.doFuzz(v.Index(i), 0)
+ }
+ return
+ }
+ v.Set(reflect.Zero(v.Type()))
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ skipField := false
+ fieldName := v.Type().Field(i).Name
+ for _, pattern := range fc.fuzzer.skipFieldPatterns {
+ if pattern.MatchString(fieldName) {
+ skipField = true
+ break
+ }
+ }
+ if !skipField {
+ fc.doFuzz(v.Field(i), 0)
+ }
+ }
+ case reflect.Chan:
+ fallthrough
+ case reflect.Func:
+ fallthrough
+ case reflect.Interface:
+ fallthrough
+ default:
+ panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+ }
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
+ // First: see if we have a fuzz function for it.
+ doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
+ if !ok {
+ // Second: see if it can fuzz itself.
+ if v.CanInterface() {
+ intf := v.Interface()
+ if fuzzable, ok := intf.(Interface); ok {
+ fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
+ return true
+ }
+ }
+ // Finally: see if there is a default fuzz function.
+ doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
+ if !ok {
+ return false
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Ptr:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ case reflect.Map:
+ if v.IsNil() {
+ if !v.CanSet() {
+ return false
+ }
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ default:
+ return false
+ }
+
+ doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+ fc: fc,
+ Rand: fc.fuzzer.r,
+ })})
+ return true
+}
+
+// Interface represents an object that knows how to fuzz itself. Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+ Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+ fc *fuzzerContext
+
+ // For convenience, Continue implements rand.Rand via embedding.
+ // Use this for generating any randomness if you want your fuzzing
+ // to be repeatable for a given seed.
+ *rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.fc.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance. This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+ v := reflect.ValueOf(obj)
+ if v.Kind() != reflect.Ptr {
+ panic("needed ptr!")
+ }
+ v = v.Elem()
+ c.fc.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+ return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+ return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+ return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+ v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+ v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+ var sec, nsec int64
+ // Allow for about 1000 years of random time values, which keeps things
+ // like JSON parsing reasonably happy.
+ sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+ c.Fuzz(&nsec)
+ *t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+ reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+ v.SetBool(randBool(r))
+ },
+ reflect.Int: fuzzInt,
+ reflect.Int8: fuzzInt,
+ reflect.Int16: fuzzInt,
+ reflect.Int32: fuzzInt,
+ reflect.Int64: fuzzInt,
+ reflect.Uint: fuzzUint,
+ reflect.Uint8: fuzzUint,
+ reflect.Uint16: fuzzUint,
+ reflect.Uint32: fuzzUint,
+ reflect.Uint64: fuzzUint,
+ reflect.Uintptr: fuzzUint,
+ reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(float64(r.Float32()))
+ },
+ reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+ v.SetFloat(r.Float64())
+ },
+ reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+ v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
+ },
+ reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+ v.SetComplex(complex(r.Float64(), r.Float64()))
+ },
+ reflect.String: func(v reflect.Value, r *rand.Rand) {
+ v.SetString(randString(r))
+ },
+ reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+ panic("unimplemented")
+ },
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+ return r.Int31()&(1<<30) == 0
+}
+
+type int63nPicker interface {
+ Int63n(int64) int64
+}
+
+// UnicodeRange describes a sequential range of unicode characters.
+// Last must be numerically greater than First.
+type UnicodeRange struct {
+ First, Last rune
+}
+
+// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
+// To be useful, each range must have at least one character (First <= Last) and
+// there must be at least one range.
+type UnicodeRanges []UnicodeRange
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (ur UnicodeRange) choose(r int63nPicker) rune {
+ count := int64(ur.Last - ur.First + 1)
+ return ur.First + rune(r.Int63n(count))
+}
+
+// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
+// Each character is selected from the range ur. If there are no characters
+// in the range (cr.Last < cr.First), this will panic.
+func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
+ ur.check()
+ return func(s *string, c Continue) {
+ *s = ur.randString(c.Rand)
+ }
+}
+
+// check is a function that used to check whether the first of ur(UnicodeRange)
+// is greater than the last one.
+func (ur UnicodeRange) check() {
+ if ur.Last < ur.First {
+ panic("The last encoding must be greater than the first one.")
+ }
+}
+
+// randString of UnicodeRange makes a random string up to 20 characters long.
+// Each character is selected form ur(UnicodeRange).
+func (ur UnicodeRange) randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ sb := strings.Builder{}
+ sb.Grow(n)
+ for i := 0; i < n; i++ {
+ sb.WriteRune(ur.choose(r))
+ }
+ return sb.String()
+}
+
+// defaultUnicodeRanges sets a default unicode range when user do not set
+// CustomStringFuzzFunc() but wants fuzz string.
+var defaultUnicodeRanges = UnicodeRanges{
+ {' ', '~'}, // ASCII characters
+ {'\u00a0', '\u02af'}, // Multi-byte encoded characters
+ {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
+// Each character is selected from one of the ranges of ur(UnicodeRanges).
+// Each range has an equal probability of being chosen. If there are no ranges,
+// or a selected range has no characters (.Last < .First), this will panic.
+// Do not modify any of the ranges in ur after calling this function.
+func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
+ // Check unicode ranges slice is empty.
+ if len(ur) == 0 {
+ panic("UnicodeRanges is empty.")
+ }
+ // if not empty, each range should be checked.
+ for i := range ur {
+ ur[i].check()
+ }
+ return func(s *string, c Continue) {
+ *s = ur.randString(c.Rand)
+ }
+}
+
+// randString of UnicodeRanges makes a random string up to 20 characters long.
+// Each character is selected form one of the ranges of ur(UnicodeRanges),
+// and each range has an equal probability of being chosen.
+func (ur UnicodeRanges) randString(r *rand.Rand) string {
+ n := r.Intn(20)
+ sb := strings.Builder{}
+ sb.Grow(n)
+ for i := 0; i < n; i++ {
+ sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
+ }
+ return sb.String()
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+ return defaultUnicodeRanges.randString(r)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+ return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod
new file mode 100644
index 0000000..8ec4fe9
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/go.mod
@@ -0,0 +1,3 @@
+module github.com/google/gofuzz
+
+go 1.12
diff --git a/vendor/github.com/gopherjs/gopherjs/LICENSE b/vendor/github.com/gopherjs/gopherjs/LICENSE
new file mode 100644
index 0000000..d496fef
--- /dev/null
+++ b/vendor/github.com/gopherjs/gopherjs/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2013 Richard Musiol. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gopherjs/gopherjs/js/js.go b/vendor/github.com/gopherjs/gopherjs/js/js.go
new file mode 100644
index 0000000..3fbf1d8
--- /dev/null
+++ b/vendor/github.com/gopherjs/gopherjs/js/js.go
@@ -0,0 +1,168 @@
+// Package js provides functions for interacting with native JavaScript APIs. Calls to these functions are treated specially by GopherJS and translated directly to their corresponding JavaScript syntax.
+//
+// Use MakeWrapper to expose methods to JavaScript. When passing values directly, the following type conversions are performed:
+//
+// | Go type | JavaScript type | Conversions back to interface{} |
+// | --------------------- | --------------------- | ------------------------------- |
+// | bool | Boolean | bool |
+// | integers and floats | Number | float64 |
+// | string | String | string |
+// | []int8 | Int8Array | []int8 |
+// | []int16 | Int16Array | []int16 |
+// | []int32, []int | Int32Array | []int |
+// | []uint8 | Uint8Array | []uint8 |
+// | []uint16 | Uint16Array | []uint16 |
+// | []uint32, []uint | Uint32Array | []uint |
+// | []float32 | Float32Array | []float32 |
+// | []float64 | Float64Array | []float64 |
+// | all other slices | Array | []interface{} |
+// | arrays | see slice type | see slice type |
+// | functions | Function | func(...interface{}) *js.Object |
+// | time.Time | Date | time.Time |
+// | - | instanceof Node | *js.Object |
+// | maps, structs | instanceof Object | map[string]interface{} |
+//
+// Additionally, for a struct containing a *js.Object field, only the content of the field will be passed to JavaScript and vice versa.
+package js
+
+// Object is a container for a native JavaScript object. Calls to its methods are treated specially by GopherJS and translated directly to their JavaScript syntax. A nil pointer to Object is equal to JavaScript's "null". Object can not be used as a map key.
+type Object struct{ object *Object }
+
+// Get returns the object's property with the given key.
+func (o *Object) Get(key string) *Object { return o.object.Get(key) }
+
+// Set assigns the value to the object's property with the given key.
+func (o *Object) Set(key string, value interface{}) { o.object.Set(key, value) }
+
+// Delete removes the object's property with the given key.
+func (o *Object) Delete(key string) { o.object.Delete(key) }
+
+// Length returns the object's "length" property, converted to int.
+func (o *Object) Length() int { return o.object.Length() }
+
+// Index returns the i'th element of an array.
+func (o *Object) Index(i int) *Object { return o.object.Index(i) }
+
+// SetIndex sets the i'th element of an array.
+func (o *Object) SetIndex(i int, value interface{}) { o.object.SetIndex(i, value) }
+
+// Call calls the object's method with the given name.
+func (o *Object) Call(name string, args ...interface{}) *Object { return o.object.Call(name, args...) }
+
+// Invoke calls the object itself. This will fail if it is not a function.
+func (o *Object) Invoke(args ...interface{}) *Object { return o.object.Invoke(args...) }
+
+// New creates a new instance of this type object. This will fail if it not a function (constructor).
+func (o *Object) New(args ...interface{}) *Object { return o.object.New(args...) }
+
+// Bool returns the object converted to bool according to JavaScript type conversions.
+func (o *Object) Bool() bool { return o.object.Bool() }
+
+// String returns the object converted to string according to JavaScript type conversions.
+func (o *Object) String() string { return o.object.String() }
+
+// Int returns the object converted to int according to JavaScript type conversions (parseInt).
+func (o *Object) Int() int { return o.object.Int() }
+
+// Int64 returns the object converted to int64 according to JavaScript type conversions (parseInt).
+func (o *Object) Int64() int64 { return o.object.Int64() }
+
+// Uint64 returns the object converted to uint64 according to JavaScript type conversions (parseInt).
+func (o *Object) Uint64() uint64 { return o.object.Uint64() }
+
+// Float returns the object converted to float64 according to JavaScript type conversions (parseFloat).
+func (o *Object) Float() float64 { return o.object.Float() }
+
+// Interface returns the object converted to interface{}. See table in package comment for details.
+func (o *Object) Interface() interface{} { return o.object.Interface() }
+
+// Unsafe returns the object as an uintptr, which can be converted via unsafe.Pointer. Not intended for public use.
+func (o *Object) Unsafe() uintptr { return o.object.Unsafe() }
+
+// Error encapsulates JavaScript errors. Those are turned into a Go panic and may be recovered, giving an *Error that holds the JavaScript error object.
+type Error struct {
+ *Object
+}
+
+// Error returns the message of the encapsulated JavaScript error object.
+func (err *Error) Error() string {
+ return "JavaScript error: " + err.Get("message").String()
+}
+
+// Stack returns the stack property of the encapsulated JavaScript error object.
+func (err *Error) Stack() string {
+ return err.Get("stack").String()
+}
+
+// Global gives JavaScript's global object ("window" for browsers and "GLOBAL" for Node.js).
+var Global *Object
+
+// Module gives the value of the "module" variable set by Node.js. Hint: Set a module export with 'js.Module.Get("exports").Set("exportName", ...)'.
+var Module *Object
+
+// Undefined gives the JavaScript value "undefined".
+var Undefined *Object
+
+// Debugger gets compiled to JavaScript's "debugger;" statement.
+func Debugger() {}
+
+// InternalObject returns the internal JavaScript object that represents i. Not intended for public use.
+func InternalObject(i interface{}) *Object {
+ return nil
+}
+
+// MakeFunc wraps a function and gives access to the values of JavaScript's "this" and "arguments" keywords.
+func MakeFunc(fn func(this *Object, arguments []*Object) interface{}) *Object {
+ return Global.Call("$makeFunc", InternalObject(fn))
+}
+
+// Keys returns the keys of the given JavaScript object.
+func Keys(o *Object) []string {
+ if o == nil || o == Undefined {
+ return nil
+ }
+ a := Global.Get("Object").Call("keys", o)
+ s := make([]string, a.Length())
+ for i := 0; i < a.Length(); i++ {
+ s[i] = a.Index(i).String()
+ }
+ return s
+}
+
+// MakeWrapper creates a JavaScript object which has wrappers for the exported methods of i. Use explicit getter and setter methods to expose struct fields to JavaScript.
+func MakeWrapper(i interface{}) *Object {
+ v := InternalObject(i)
+ o := Global.Get("Object").New()
+ o.Set("__internal_object__", v)
+ methods := v.Get("constructor").Get("methods")
+ for i := 0; i < methods.Length(); i++ {
+ m := methods.Index(i)
+ if m.Get("pkg").String() != "" { // not exported
+ continue
+ }
+ o.Set(m.Get("name").String(), func(args ...*Object) *Object {
+ return Global.Call("$externalizeFunction", v.Get(m.Get("prop").String()), m.Get("typ"), true).Call("apply", v, args)
+ })
+ }
+ return o
+}
+
+// NewArrayBuffer creates a JavaScript ArrayBuffer from a byte slice.
+func NewArrayBuffer(b []byte) *Object {
+ slice := InternalObject(b)
+ offset := slice.Get("$offset").Int()
+ length := slice.Get("$length").Int()
+ return slice.Get("$array").Get("buffer").Call("slice", offset, offset+length)
+}
+
+// M is a simple map type. It is intended as a shorthand for JavaScript objects (before conversion).
+type M map[string]interface{}
+
+// S is a simple slice type. It is intended as a shorthand for JavaScript arrays (before conversion).
+type S []interface{}
+
+func init() {
+ // avoid dead code elimination
+ e := Error{}
+ _ = e
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000..955dc0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000..1555653
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000..c8a9fbb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[[projects]]
+ name = "github.com/modern-go/reflect2"
+ packages = ["."]
+ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+ version = "1.0.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000..313a0f8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+ name = "github.com/modern-go/reflect2"
+ version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000..2cf4f5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000..c589add
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,85 @@
+[](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[](https://pkg.go.dev/github.com/json-iterator/go)
+[](https://travis-ci.org/json-iterator/go)
+[](https://codecov.io/gh/json-iterator/go)
+[](https://goreportcard.com/report/github.com/json-iterator/go)
+[](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
+[](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+# Benchmark
+
+
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --------------- | ----------- | ---------------- | ---------------- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload.
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with
+
+```go
+import jsoniter "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import jsoniter "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+- [thockin](https://github.com/thockin)
+- [mattn](https://github.com/mattn)
+- [cch123](https://github.com/cch123)
+- [Oleg Shaldybin](https://github.com/olegshaldybin)
+- [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000..92d2cc4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+ "bytes"
+ "io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+ return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString is a convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+ return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+ return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+ return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+ return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+ return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+ iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+ if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+ if !adapter.iter.loadMore() {
+ return io.EOF
+ }
+ }
+ adapter.iter.ReadVal(obj)
+ err := adapter.iter.Error
+ if err == io.EOF {
+ return nil
+ }
+ return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+ iter := adapter.iter
+ if iter.Error != nil {
+ return false
+ }
+ c := iter.nextToken()
+ if c == 0 {
+ return false
+ }
+ iter.unreadByte()
+ return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+ remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+ return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.UseNumber = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.DisallowUnknownFields = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+ return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+ stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+ adapter.stream.WriteVal(val)
+ adapter.stream.WriteRaw("\n")
+ adapter.stream.Flush()
+ return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.IndentionStep = len(indent)
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.EscapeHTML = escapeHTML
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ return ConfigDefault.Valid(data)
+}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000..f6b8aea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
+package jsoniter
+
+import (
+ "errors"
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+ LastError() error
+ ValueType() ValueType
+ MustBeValid() Any
+ ToBool() bool
+ ToInt() int
+ ToInt32() int32
+ ToInt64() int64
+ ToUint() uint
+ ToUint32() uint32
+ ToUint64() uint64
+ ToFloat32() float32
+ ToFloat64() float64
+ ToString() string
+ ToVal(val interface{})
+ Get(path ...interface{}) Any
+ Size() int
+ Keys() []string
+ GetInterface() interface{}
+ WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+ return 0
+}
+
+func (any *baseAny) Keys() []string {
+ return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+ panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+ return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+ return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+ return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+ return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+ return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+ return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+ if val == nil {
+ return &nilAny{}
+ }
+ asAny, isAny := val.(Any)
+ if isAny {
+ return asAny
+ }
+ typ := reflect2.TypeOf(val)
+ switch typ.Kind() {
+ case reflect.Slice:
+ return wrapArray(val)
+ case reflect.Struct:
+ return wrapStruct(val)
+ case reflect.Map:
+ return wrapMap(val)
+ case reflect.String:
+ return WrapString(val.(string))
+ case reflect.Int:
+ if strconv.IntSize == 32 {
+ return WrapInt32(int32(val.(int)))
+ }
+ return WrapInt64(int64(val.(int)))
+ case reflect.Int8:
+ return WrapInt32(int32(val.(int8)))
+ case reflect.Int16:
+ return WrapInt32(int32(val.(int16)))
+ case reflect.Int32:
+ return WrapInt32(val.(int32))
+ case reflect.Int64:
+ return WrapInt64(val.(int64))
+ case reflect.Uint:
+ if strconv.IntSize == 32 {
+ return WrapUint32(uint32(val.(uint)))
+ }
+ return WrapUint64(uint64(val.(uint)))
+ case reflect.Uintptr:
+ if ptrSize == 32 {
+ return WrapUint32(uint32(val.(uintptr)))
+ }
+ return WrapUint64(uint64(val.(uintptr)))
+ case reflect.Uint8:
+ return WrapUint32(uint32(val.(uint8)))
+ case reflect.Uint16:
+ return WrapUint32(uint32(val.(uint16)))
+ case reflect.Uint32:
+ return WrapUint32(uint32(val.(uint32)))
+ case reflect.Uint64:
+ return WrapUint64(val.(uint64))
+ case reflect.Float32:
+ return WrapFloat64(float64(val.(float32)))
+ case reflect.Float64:
+ return WrapFloat64(val.(float64))
+ case reflect.Bool:
+ if val.(bool) == true {
+ return &trueAny{}
+ }
+ return &falseAny{}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+ return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.unreadByte()
+ return &stringAny{baseAny{}, iter.ReadString()}
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return &nilAny{}
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ return &trueAny{}
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ return &falseAny{}
+ case '{':
+ return iter.readObjectAny()
+ case '[':
+ return iter.readArrayAny()
+ case '-':
+ return iter.readNumberAny(false)
+ case 0:
+ return &invalidAny{baseAny{}, errors.New("input is empty")}
+ default:
+ return iter.readNumberAny(true)
+ }
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipNumber()
+ lazyBuf := iter.stopCapture()
+ return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipObject()
+ lazyBuf := iter.stopCapture()
+ return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipArray()
+ lazyBuf := iter.stopCapture()
+ return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+ var found []byte
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ if field == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ return true
+ })
+ return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+ var found []byte
+ n := 0
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ if n == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ n++
+ return true
+ })
+ return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+ for i, pathKeyObj := range path {
+ switch pathKey := pathKeyObj.(type) {
+ case string:
+ valueBytes := locateObjectField(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int:
+ valueBytes := locateArrayElement(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int32:
+ if '*' == pathKey {
+ return iter.readAny().Get(path[i:]...)
+ }
+ return newInvalidAny(path[i:])
+ default:
+ return newInvalidAny(path[i:])
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return &invalidAny{baseAny{}, iter.Error}
+ }
+ return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+type anyCodec struct {
+ valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ any := *(*Any)(ptr)
+ if any == nil {
+ stream.WriteNil()
+ return
+ }
+ any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ any := *(*Any)(ptr)
+ return any.Size() == 0
+}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000..0449e9a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type arrayLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateArrayElement(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ arr := make([]Any, 0)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ found := iter.readAny().Get(path[1:]...)
+ if found.ValueType() != InvalidValue {
+ arr = append(arr, found)
+ }
+ return true
+ })
+ return wrapArray(arr)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ size++
+ iter.Skip()
+ return true
+ })
+ return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type arrayAny struct {
+ baseAny
+ val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+ return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayAny) LastError() error {
+ return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+ return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToString() string {
+ str, _ := MarshalToString(any.val.Interface())
+ return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ if firstPath < 0 || firstPath >= any.val.Len() {
+ return newInvalidAny(path)
+ }
+ return Wrap(any.val.Index(firstPath).Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := make([]Any, 0)
+ for i := 0; i < any.val.Len(); i++ {
+ mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll = append(mappedAll, mapped)
+ }
+ }
+ return wrapArray(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000..9452324
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+ baseAny
+}
+
+func (any *trueAny) LastError() error {
+ return nil
+}
+
+func (any *trueAny) ToBool() bool {
+ return true
+}
+
+func (any *trueAny) ToInt() int {
+ return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+ return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+ return 1
+}
+
+func (any *trueAny) ToUint() uint {
+ return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+ return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+ return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+ return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+ return 1
+}
+
+func (any *trueAny) ToString() string {
+ return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+ stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+ return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+ return any
+}
+
+type falseAny struct {
+ baseAny
+}
+
+func (any *falseAny) LastError() error {
+ return nil
+}
+
+func (any *falseAny) ToBool() bool {
+ return false
+}
+
+func (any *falseAny) ToInt() int {
+ return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *falseAny) ToUint() uint {
+ return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *falseAny) ToString() string {
+ return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+ stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+ return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+ return any
+}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000..35fdb09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type floatAny struct {
+ baseAny
+ val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+ return any
+}
+
+func (any *floatAny) LastError() error {
+ return nil
+}
+
+func (any *floatAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+ return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+ if any.val > 0 {
+ return uint(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+ if any.val > 0 {
+ return uint32(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+ if any.val > 0 {
+ return uint64(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+ return any.val
+}
+
+func (any *floatAny) ToString() string {
+ return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+ stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000..1b56f39
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int32Any struct {
+ baseAny
+ val int32
+}
+
+func (any *int32Any) LastError() error {
+ return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+ return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+ stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000..c440d72
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int64Any struct {
+ baseAny
+ val int64
+}
+
+func (any *int64Any) LastError() error {
+ return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+ return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+ return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+ stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000..1d859ea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+ baseAny
+ err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+ return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+ return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+ return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+ panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+ return false
+}
+
+func (any *invalidAny) ToInt() int {
+ return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+ return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *invalidAny) ToString() string {
+ return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+ if any.err == nil {
+ return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000..d04cb54
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+ baseAny
+}
+
+func (any *nilAny) LastError() error {
+ return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+ return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+ return any
+}
+
+func (any *nilAny) ToBool() bool {
+ return false
+}
+
+func (any *nilAny) ToInt() int {
+ return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *nilAny) ToUint() uint {
+ return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *nilAny) ToString() string {
+ return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+ stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000..9d1e901
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+ "io"
+ "unsafe"
+)
+
+type numberLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *numberLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000..c44ef5c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type objectLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+ return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateObjectField(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ mapped := locatePath(iter, path[1:])
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[field] = mapped
+ }
+ return true
+ })
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectLazyAny) Keys() []string {
+ keys := []string{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ keys = append(keys, field)
+ return true
+ })
+ return keys
+}
+
+func (any *objectLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ size++
+ return true
+ })
+ return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type objectAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+ return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *objectAny) LastError() error {
+ return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+ return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+ return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ field := any.val.FieldByName(firstPath)
+ if !field.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(field.Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for i := 0; i < any.val.NumField(); i++ {
+ field := any.val.Field(i)
+ if field.CanInterface() {
+ mapped := Wrap(field.Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[any.val.Type().Field(i).Name] = mapped
+ }
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectAny) Keys() []string {
+ keys := make([]string, 0, any.val.NumField())
+ for i := 0; i < any.val.NumField(); i++ {
+ keys = append(keys, any.val.Type().Field(i).Name)
+ }
+ return keys
+}
+
+func (any *objectAny) Size() int {
+ return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
+
+type mapAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+ return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+ return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *mapAny) LastError() error {
+ return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+ return true
+}
+
+func (any *mapAny) ToInt() int {
+ return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *mapAny) ToUint() uint {
+ return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *mapAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for _, key := range any.val.MapKeys() {
+ keyAsStr := key.String()
+ element := Wrap(any.val.MapIndex(key).Interface())
+ mapped := element.Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[keyAsStr] = mapped
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ value := any.val.MapIndex(reflect.ValueOf(firstPath))
+ if !value.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(value.Interface())
+ }
+}
+
+func (any *mapAny) Keys() []string {
+ keys := make([]string, 0, any.val.Len())
+ for _, key := range any.val.MapKeys() {
+ keys = append(keys, key.String())
+ }
+ return keys
+}
+
+func (any *mapAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000..1f12f66
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type stringAny struct {
+ baseAny
+ val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+ return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+ return any
+}
+
+func (any *stringAny) LastError() error {
+ return nil
+}
+
+func (any *stringAny) ToBool() bool {
+ str := any.ToString()
+ if str == "0" {
+ return false
+ }
+ for _, c := range str {
+ switch c {
+ case ' ', '\n', '\r', '\t':
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func (any *stringAny) ToInt() int {
+ return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+ return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+ if any.val == "" {
+ return 0
+ }
+
+ flag := 1
+ startPos := 0
+ if any.val[0] == '+' || any.val[0] == '-' {
+ startPos = 1
+ }
+
+ if any.val[0] == '-' {
+ flag = -1
+ }
+
+ endPos := startPos
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+ return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+ return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+ return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+ if any.val == "" {
+ return 0
+ }
+
+ startPos := 0
+
+ if any.val[0] == '-' {
+ return 0
+ }
+ if any.val[0] == '+' {
+ startPos = 1
+ }
+
+ endPos := startPos
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+ return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+ return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+ if len(any.val) == 0 {
+ return 0
+ }
+
+ // first char invalid
+ if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+ return 0
+ }
+
+ // extract valid num expression from string
+ // eg 123true => 123, -12.12xxa => -12.12
+ endPos := 1
+ for i := 1; i < len(any.val); i++ {
+ if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+ endPos = i + 1
+ continue
+ }
+
+ // end position is the first char which is not digit
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ endPos = i
+ break
+ }
+ }
+ parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+ return parsed
+}
+
+func (any *stringAny) ToString() string {
+ return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+ stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000..656bbd3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint32Any struct {
+ baseAny
+ val uint32
+}
+
+func (any *uint32Any) LastError() error {
+ return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+ return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+ stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000..7df2fce
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint64Any struct {
+ baseAny
+ val uint64
+}
+
+func (any *uint64Any) LastError() error {
+ return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+ return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+ return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+ stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 0000000..b45ef68
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+ mkdir -p /tmp/build-golang/src/github.com/json-iterator
+ ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000..2adcdc3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/modern-go/concurrent"
+ "github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+ IndentionStep int
+ MarshalFloatWith6Digits bool
+ EscapeHTML bool
+ SortMapKeys bool
+ UseNumber bool
+ DisallowUnknownFields bool
+ TagKey string
+ OnlyTaggedField bool
+ ValidateJsonRawMessage bool
+ ObjectFieldMustBeSimpleString bool
+ CaseSensitive bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+ IteratorPool
+ StreamPool
+ MarshalToString(v interface{}) (string, error)
+ Marshal(v interface{}) ([]byte, error)
+ MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+ UnmarshalFromString(str string, v interface{}) error
+ Unmarshal(data []byte, v interface{}) error
+ Get(data []byte, path ...interface{}) Any
+ NewEncoder(writer io.Writer) *Encoder
+ NewDecoder(reader io.Reader) *Decoder
+ Valid(data []byte) bool
+ RegisterExtension(extension Extension)
+ DecoderOf(typ reflect2.Type) ValDecoder
+ EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+ EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+ EscapeHTML: false,
+ MarshalFloatWith6Digits: true, // will lose precession
+ ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+ configBeforeFrozen Config
+ sortMapKeys bool
+ indentionStep int
+ objectFieldMustBeSimpleString bool
+ onlyTaggedField bool
+ disallowUnknownFields bool
+ decoderCache *concurrent.Map
+ encoderCache *concurrent.Map
+ encoderExtension Extension
+ decoderExtension Extension
+ extraExtensions []Extension
+ streamPool *sync.Pool
+ iteratorPool *sync.Pool
+ caseSensitive bool
+}
+
+func (cfg *frozenConfig) initCache() {
+ cfg.decoderCache = concurrent.NewMap()
+ cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+ cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+ cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+ decoder, found := cfg.decoderCache.Load(cacheKey)
+ if found {
+ return decoder.(ValDecoder)
+ }
+ return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+ encoder, found := cfg.encoderCache.Load(cacheKey)
+ if found {
+ return encoder.(ValEncoder)
+ }
+ return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+ obj, found := cfgCache.Load(cfg)
+ if found {
+ return obj.(*frozenConfig)
+ }
+ return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+ cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+ api := &frozenConfig{
+ sortMapKeys: cfg.SortMapKeys,
+ indentionStep: cfg.IndentionStep,
+ objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+ onlyTaggedField: cfg.OnlyTaggedField,
+ disallowUnknownFields: cfg.DisallowUnknownFields,
+ caseSensitive: cfg.CaseSensitive,
+ }
+ api.streamPool = &sync.Pool{
+ New: func() interface{} {
+ return NewStream(api, nil, 512)
+ },
+ }
+ api.iteratorPool = &sync.Pool{
+ New: func() interface{} {
+ return NewIterator(api)
+ },
+ }
+ api.initCache()
+ encoderExtension := EncoderExtension{}
+ decoderExtension := DecoderExtension{}
+ if cfg.MarshalFloatWith6Digits {
+ api.marshalFloatWith6Digits(encoderExtension)
+ }
+ if cfg.EscapeHTML {
+ api.escapeHTML(encoderExtension)
+ }
+ if cfg.UseNumber {
+ api.useNumber(decoderExtension)
+ }
+ if cfg.ValidateJsonRawMessage {
+ api.validateJsonRawMessage(encoderExtension)
+ }
+ api.encoderExtension = encoderExtension
+ api.decoderExtension = decoderExtension
+ api.configBeforeFrozen = cfg
+ return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+ api := getFrozenConfigFromCache(cfg)
+ if api != nil {
+ return api
+ }
+ api = cfg.Froze().(*frozenConfig)
+ for _, extension := range extraExtensions {
+ api.RegisterExtension(extension)
+ }
+ addFrozenConfigToCache(cfg, api)
+ return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+ encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+ rawMessage := *(*json.RawMessage)(ptr)
+ iter := cfg.BorrowIterator([]byte(rawMessage))
+ defer cfg.ReturnIterator(iter)
+ iter.Read()
+ if iter.Error != nil && iter.Error != io.EOF {
+ stream.WriteRaw("null")
+ } else {
+ stream.WriteRaw(string(rawMessage))
+ }
+ }, func(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+ }}
+ extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+ extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+ extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+ exitingValue := *((*interface{})(ptr))
+ if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+ iter.ReadVal(exitingValue)
+ return
+ }
+ if iter.WhatIsNext() == NumberValue {
+ *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+ } else {
+ *((*interface{})(ptr)) = iter.Read()
+ }
+ }}
+}
+func (cfg *frozenConfig) getTagKey() string {
+ tagKey := cfg.configBeforeFrozen.TagKey
+ if tagKey == "" {
+ return "json"
+ }
+ return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+ cfg.extraExtensions = append(cfg.extraExtensions, extension)
+ copied := cfg.configBeforeFrozen
+ cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+ // for better performance
+ extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+ extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+ encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+ typeDecoders = map[string]ValDecoder{}
+ fieldDecoders = map[string]ValDecoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+ typeEncoders = map[string]ValEncoder{}
+ fieldEncoders = map[string]ValEncoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return "", stream.Error
+ }
+ return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return nil, stream.Error
+ }
+ result := stream.Buffer()
+ copied := make([]byte, len(result))
+ copy(copied, result)
+ return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ if prefix != "" {
+ panic("prefix is not supported")
+ }
+ for _, r := range indent {
+ if r != ' ' {
+ panic("indent can only be space")
+ }
+ }
+ newCfg := cfg.configBeforeFrozen
+ newCfg.IndentionStep = len(indent)
+ return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+ data := []byte(str)
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+ stream := NewStream(cfg, writer, 512)
+ return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+ iter := Parse(cfg, reader, 512)
+ return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.Skip()
+ return iter.Error == nil
+}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000..3095662
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin|
+| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin|
+| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod
new file mode 100644
index 0000000..e817ccc
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.mod
@@ -0,0 +1,11 @@
+module github.com/json-iterator/go
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/google/gofuzz v1.0.0
+ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
+ github.com/modern-go/reflect2 v1.0.2
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
new file mode 100644
index 0000000..4b7bb8a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -0,0 +1,14 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000..29b31cf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,349 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+ // InvalidValue invalid JSON element
+ InvalidValue ValueType = iota
+ // StringValue JSON element "string"
+ StringValue
+ // NumberValue JSON element 100 or 0.10
+ NumberValue
+ // NilValue JSON element null
+ NilValue
+ // BoolValue JSON element true or false
+ BoolValue
+ // ArrayValue JSON element []
+ ArrayValue
+ // ObjectValue JSON element {}
+ ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+ hexDigits = make([]byte, 256)
+ for i := 0; i < len(hexDigits); i++ {
+ hexDigits[i] = 255
+ }
+ for i := '0'; i <= '9'; i++ {
+ hexDigits[i] = byte(i - '0')
+ }
+ for i := 'a'; i <= 'f'; i++ {
+ hexDigits[i] = byte((i - 'a') + 10)
+ }
+ for i := 'A'; i <= 'F'; i++ {
+ hexDigits[i] = byte((i - 'A') + 10)
+ }
+ valueTypes = make([]ValueType, 256)
+ for i := 0; i < len(valueTypes); i++ {
+ valueTypes[i] = InvalidValue
+ }
+ valueTypes['"'] = StringValue
+ valueTypes['-'] = NumberValue
+ valueTypes['0'] = NumberValue
+ valueTypes['1'] = NumberValue
+ valueTypes['2'] = NumberValue
+ valueTypes['3'] = NumberValue
+ valueTypes['4'] = NumberValue
+ valueTypes['5'] = NumberValue
+ valueTypes['6'] = NumberValue
+ valueTypes['7'] = NumberValue
+ valueTypes['8'] = NumberValue
+ valueTypes['9'] = NumberValue
+ valueTypes['t'] = BoolValue
+ valueTypes['f'] = BoolValue
+ valueTypes['n'] = NilValue
+ valueTypes['['] = ArrayValue
+ valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+ cfg *frozenConfig
+ reader io.Reader
+ buf []byte
+ head int
+ tail int
+ depth int
+ captureStartedAt int
+ captured []byte
+ Error error
+ Attachment interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: nil,
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: reader,
+ buf: make([]byte, bufSize),
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: input,
+ head: 0,
+ tail: len(input),
+ depth: 0,
+ }
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+ return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+ return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+ iter.reader = reader
+ iter.head = 0
+ iter.tail = 0
+ iter.depth = 0
+ return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+ iter.reader = nil
+ iter.buf = input
+ iter.head = 0
+ iter.tail = len(input)
+ iter.depth = 0
+ return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+ valueType := valueTypes[iter.nextToken()]
+ iter.unreadByte()
+ return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i
+ return false
+ }
+ return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+ c := iter.nextToken()
+ if c == ',' {
+ return false
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+ return true
+}
+
+func (iter *Iterator) nextToken() byte {
+ // a variation of skip whitespaces, returning the next non-whitespace token
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i + 1
+ return c
+ }
+ if !iter.loadMore() {
+ return 0
+ }
+ }
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+ if iter.Error != nil {
+ if iter.Error != io.EOF {
+ return
+ }
+ }
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ peekEnd := iter.head + 10
+ if peekEnd > iter.tail {
+ peekEnd = iter.tail
+ }
+ parsing := string(iter.buf[peekStart:peekEnd])
+ contextStart := iter.head - 50
+ if contextStart < 0 {
+ contextStart = 0
+ }
+ contextEnd := iter.head + 50
+ if contextEnd > iter.tail {
+ contextEnd = iter.tail
+ }
+ context := string(iter.buf[contextStart:contextEnd])
+ iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+ operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+ string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+ if iter.head == iter.tail {
+ if iter.loadMore() {
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+ }
+ return 0
+ }
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+ if iter.reader == nil {
+ if iter.Error == nil {
+ iter.head = iter.tail
+ iter.Error = io.EOF
+ }
+ return false
+ }
+ if iter.captured != nil {
+ iter.captured = append(iter.captured,
+ iter.buf[iter.captureStartedAt:iter.tail]...)
+ iter.captureStartedAt = 0
+ }
+ for {
+ n, err := iter.reader.Read(iter.buf)
+ if n == 0 {
+ if err != nil {
+ if iter.Error == nil {
+ iter.Error = err
+ }
+ return false
+ }
+ } else {
+ iter.head = 0
+ iter.tail = n
+ return true
+ }
+ }
+}
+
+func (iter *Iterator) unreadByte() {
+ if iter.Error != nil {
+ return
+ }
+ iter.head--
+ return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case StringValue:
+ return iter.ReadString()
+ case NumberValue:
+ if iter.cfg.configBeforeFrozen.UseNumber {
+ return json.Number(iter.readNumberAsString())
+ }
+ return iter.ReadFloat64()
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ return nil
+ case BoolValue:
+ return iter.ReadBool()
+ case ArrayValue:
+ arr := []interface{}{}
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ arr = append(arr, elem)
+ return true
+ })
+ return arr
+ case ObjectValue:
+ obj := map[string]interface{}{}
+ iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ obj[field] = elem
+ return true
+ })
+ return obj
+ default:
+ iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+ return nil
+ }
+}
+
+// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
+const maxDepth = 10000
+
+func (iter *Iterator) incrementDepth() (success bool) {
+ iter.depth++
+ if iter.depth <= maxDepth {
+ return true
+ }
+ iter.ReportError("incrementDepth", "exceeded max depth")
+ return false
+}
+
+func (iter *Iterator) decrementDepth() (success bool) {
+ iter.depth--
+ if iter.depth >= 0 {
+ return true
+ }
+ iter.ReportError("decrementDepth", "unexpected negative nesting")
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000..204fe0e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,64 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false // null
+ case '[':
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ return true
+ }
+ return false
+ case ']':
+ return false
+ case ',':
+ return true
+ default:
+ iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+ return
+ }
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+ c := iter.nextToken()
+ if c == '[' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != ']' {
+ iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ return iter.decrementDepth()
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000..8a3d8b6
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,342 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+ "unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+ floatDigits = make([]int8, 256)
+ for i := 0; i < len(floatDigits); i++ {
+ floatDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ floatDigits[i] = i - int8('0')
+ }
+ floatDigits[','] = endOfNumber
+ floatDigits[']'] = endOfNumber
+ floatDigits['}'] = endOfNumber
+ floatDigits[' '] = endOfNumber
+ floatDigits['\t'] = endOfNumber
+ floatDigits['\n'] = endOfNumber
+ floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ prec := 64
+ if len(str) > prec {
+ prec = len(str)
+ }
+ val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+ if err != nil {
+ iter.Error = err
+ return nil
+ }
+ return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ ret = big.NewInt(0)
+ var success bool
+ ret, success = ret.SetString(str, 10)
+ if !success {
+ iter.ReportError("ReadBigInt", "invalid big int")
+ return nil
+ }
+ return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat32()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat32", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat32", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat32", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float32(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float32(float64(value) / float64(pow10[decimalPlaces]))
+ }
+ // too many decimal places
+ return iter.readFloat32SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat32SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+ strBuf := [16]byte{}
+ str := strBuf[0:0]
+load_loop:
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ str = append(str, c)
+ continue
+ default:
+ iter.head = i
+ break load_loop
+ }
+ }
+ if !iter.loadMore() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ if len(str) == 0 {
+ iter.ReportError("readNumberAsString", "invalid number")
+ }
+ return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat32SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat64()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat64", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat64", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat64", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float64(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float64(value) / float64(pow10[decimalPlaces])
+ }
+ // too many decimal places
+ return iter.readFloat64SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat64SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ if value > maxFloat64 {
+ return iter.readFloat64SlowPath()
+ }
+ }
+ }
+ return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat64SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return val
+}
+
+func validateFloat(str string) string {
+ // strconv.ParseFloat is not validating `1.` or `1.e1`
+ if len(str) == 0 {
+ return "empty number"
+ }
+ if str[0] == '-' {
+ return "-- is not valid"
+ }
+ dotPos := strings.IndexByte(str, '.')
+ if dotPos != -1 {
+ if dotPos == len(str)-1 {
+ return "dot can not be last character"
+ }
+ switch str[dotPos+1] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ return "missing digit after dot"
+ }
+ }
+ return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+ return json.Number(iter.readNumberAsString())
+}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000..d786a89
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,346 @@
+package jsoniter
+
+import (
+ "math"
+ "strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+const maxFloat64 = 1<<53 - 1
+
+func init() {
+ intDigits = make([]int8, 256)
+ for i := 0; i < len(intDigits); i++ {
+ intDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ intDigits[i] = i - int8('0')
+ }
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+ if strconv.IntSize == 32 {
+ return uint(iter.ReadUint32())
+ }
+ return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+ if strconv.IntSize == 32 {
+ return int(iter.ReadInt32())
+ }
+ return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt8+1 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int8(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt8 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint8 {
+ iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt16+1 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int16(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt16 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint16 {
+ iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt32+1 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int32(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt32 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+ return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint32(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint32(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint32(ind2)*10 + uint32(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint32SafeToMultiply10 {
+ value2 := (value << 3) + (value << 1) + uint32(ind)
+ if value2 < value {
+ iter.ReportError("readUint32", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint32(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint64(iter.readByte())
+ if val > math.MaxInt64+1 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return -int64(val)
+ }
+ val := iter.readUint64(c)
+ if val > math.MaxInt64 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+ return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint64(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint64(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint64(ind2)*10 + uint64(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint64SafeToMultiple10 {
+ value2 := (value << 3) + (value << 1) + uint64(ind)
+ if value2 < value {
+ iter.ReportError("readUint64", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+func (iter *Iterator) assertInteger() {
+ if iter.head < iter.tail && iter.buf[iter.head] == '.' {
+ iter.ReportError("assertInteger", "can not decode float as int")
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000..58ee89c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,267 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return "" // null
+ case '{':
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ }
+ if c == '}' {
+ return "" // end of object
+ }
+ iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+ return
+ case ',':
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ case '}':
+ return "" // end of object
+ default:
+ iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+ return
+ }
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+ hash := int64(0x811c9dc5)
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+ return 0
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ b := iter.buf[i]
+ if b == '\\' {
+ iter.head = i
+ for _, b := range iter.readStringSlowPath() {
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if b == '"' {
+ iter.head = i + 1
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ if !iter.loadMore() {
+ iter.ReportError("readFieldHash", `incomplete field name`)
+ return 0
+ }
+ }
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+ if !caseSensitive {
+ str = strings.ToLower(str)
+ }
+ hash := int64(0x811c9dc5)
+ for _, b := range []byte(str) {
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ var field string
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadObjectCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+ c := iter.nextToken()
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '}' {
+ return false
+ }
+ iter.unreadByte()
+ return true
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false
+ }
+ iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+ str := iter.ReadStringAsSlice()
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if iter.buf[iter.head] != ':' {
+ iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+ return
+ }
+ iter.head++
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if ret == nil {
+ return str
+ }
+ return ret
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000..e91eefb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,130 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return true
+ }
+ iter.unreadByte()
+ return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+ c := iter.nextToken()
+ if c == 't' {
+ iter.skipThreeBytes('r', 'u', 'e')
+ return true
+ }
+ if c == 'f' {
+ iter.skipFourBytes('a', 'l', 's', 'e')
+ return false
+ }
+ iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+ return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+ iter.startCapture(iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+// SkipAndAppendBytes skips next JSON element and appends its content to
+// buffer, returning the result.
+func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
+ iter.startCaptureTo(buf, iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
+ if iter.captured != nil {
+ panic("already in capture mode")
+ }
+ iter.captureStartedAt = captureStartedAt
+ iter.captured = buf
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+ iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+ if iter.captured == nil {
+ panic("not in capture mode")
+ }
+ captured := iter.captured
+ remaining := iter.buf[iter.captureStartedAt:iter.head]
+ iter.captureStartedAt = -1
+ iter.captured = nil
+ return append(captured, remaining...)
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.skipString()
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ case '0':
+ iter.unreadByte()
+ iter.ReadFloat32()
+ case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.skipNumber()
+ case '[':
+ iter.skipArray()
+ case '{':
+ iter.skipObject()
+ default:
+ iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+ return
+ }
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b4 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000..9303de4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,163 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\r', '\t', ',', '}', ']':
+ iter.head = i
+ return
+ }
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipArray() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '[': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case ']': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete array")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipObject() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '{': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case '}': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete object")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipString() {
+ for {
+ end, escaped := iter.findStringEnd()
+ if end == -1 {
+ if !iter.loadMore() {
+ iter.ReportError("skipString", "incomplete string")
+ return
+ }
+ if escaped {
+ iter.head = 1 // skip the first char as last char read is \
+ }
+ } else {
+ iter.head = end
+ return
+ }
+ }
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+ escaped := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ if !escaped {
+ return i + 1, false
+ }
+ j := i - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return i + 1, true
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+ }
+ } else if c == '\\' {
+ escaped = true
+ }
+ }
+ j := iter.tail - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return -1, false // do not end with \
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+
+ }
+ return -1, true // end with \
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000..6cf66d0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+)
+
+func (iter *Iterator) skipNumber() {
+ if !iter.trySkipNumber() {
+ iter.unreadByte()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = nil
+ iter.ReadBigFloat()
+ }
+ }
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+ dotFound := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ case '.':
+ if dotFound {
+ iter.ReportError("validateNumber", `more than one dot found in number`)
+ return true // already failed
+ }
+ if i+1 == iter.tail {
+ return false
+ }
+ c = iter.buf[i+1]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ iter.ReportError("validateNumber", `missing digit after dot`)
+ return true // already failed
+ }
+ dotFound = true
+ default:
+ switch c {
+ case ',', ']', '}', ' ', '\t', '\n', '\r':
+ if iter.head == i {
+ return false // if - without following digits
+ }
+ iter.head = i
+ return true // must be valid
+ }
+ return false // may be invalid
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipString() {
+ if !iter.trySkipString() {
+ iter.unreadByte()
+ iter.ReadString()
+ }
+}
+
+func (iter *Iterator) trySkipString() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ iter.head = i + 1
+ return true // valid
+ } else if c == '\\' {
+ return false
+ } else if c < ' ' {
+ iter.ReportError("trySkipString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return true // already failed
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipObject() {
+ iter.unreadByte()
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ return true
+ })
+}
+
+func (iter *Iterator) skipArray() {
+ iter.unreadByte()
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ iter.Skip()
+ return true
+ })
+}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000..adc487e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+ "fmt"
+ "unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ ret = string(iter.buf[iter.head:i])
+ iter.head = i + 1
+ return ret
+ } else if c == '\\' {
+ break
+ } else if c < ' ' {
+ iter.ReportError("ReadString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return
+ }
+ }
+ return iter.readStringSlowPath()
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return ""
+ }
+ iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+ var str []byte
+ var c byte
+ for iter.Error == nil {
+ c = iter.readByte()
+ if c == '"' {
+ return string(str)
+ }
+ if c == '\\' {
+ c = iter.readByte()
+ str = iter.readEscapedChar(c, str)
+ } else {
+ str = append(str, c)
+ }
+ }
+ iter.ReportError("readStringSlowPath", "unexpected end of input")
+ return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+ switch c {
+ case 'u':
+ r := iter.readU4()
+ if utf16.IsSurrogate(r) {
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != '\\' {
+ iter.unreadByte()
+ str = appendRune(str, r)
+ return str
+ }
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != 'u' {
+ str = appendRune(str, r)
+ return iter.readEscapedChar(c, str)
+ }
+ r2 := iter.readU4()
+ if iter.Error != nil {
+ return nil
+ }
+ combined := utf16.DecodeRune(r, r2)
+ if combined == '\uFFFD' {
+ str = appendRune(str, r)
+ str = appendRune(str, r2)
+ } else {
+ str = appendRune(str, combined)
+ }
+ } else {
+ str = appendRune(str, r)
+ }
+ case '"':
+ str = append(str, '"')
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ default:
+ iter.ReportError("readEscapedChar",
+ `invalid escape char after \`)
+ return nil
+ }
+ return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ // for: field name, base64, number
+ if iter.buf[i] == '"' {
+ // fast path: reuse the underlying buffer
+ ret = iter.buf[iter.head:i]
+ iter.head = i + 1
+ return ret
+ }
+ }
+ readLen := iter.tail - iter.head
+ copied := make([]byte, readLen, readLen*2)
+ copy(copied, iter.buf[iter.head:iter.tail])
+ iter.head = iter.tail
+ for iter.Error == nil {
+ c := iter.readByte()
+ if c == '"' {
+ return copied
+ }
+ copied = append(copied, c)
+ }
+ return copied
+ }
+ iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+ for i := 0; i < 4; i++ {
+ c := iter.readByte()
+ if iter.Error != nil {
+ return
+ }
+ if c >= '0' && c <= '9' {
+ ret = ret*16 + rune(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ ret = ret*16 + rune(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ ret = ret*16 + rune(c-'A'+10)
+ } else {
+ iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+ return
+ }
+ }
+ return ret
+}
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+
+ maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p = append(p, byte(r))
+ return p
+ case i <= rune2Max:
+ p = append(p, t2|byte(r>>6))
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+ r = runeError
+ fallthrough
+ case i <= rune3Max:
+ p = append(p, t3|byte(r>>12))
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ default:
+ p = append(p, t4|byte(r>>18))
+ p = append(p, tx|byte(r>>12)&maskx)
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000..c2934f9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000..e2389b5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+ BorrowIterator(data []byte) *Iterator
+ ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+ BorrowStream(writer io.Writer) *Stream
+ ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+ stream := cfg.streamPool.Get().(*Stream)
+ stream.Reset(writer)
+ return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+ stream.out = nil
+ stream.Error = nil
+ stream.Attachment = nil
+ cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+ iter := cfg.iteratorPool.Get().(*Iterator)
+ iter.ResetBytes(data)
+ return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+ iter.Error = nil
+ iter.Attachment = nil
+ cfg.iteratorPool.Put(iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000..39acb32
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,337 @@
+package jsoniter
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+ Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+ Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+ *frozenConfig
+ prefix string
+ encoders map[reflect2.Type]ValEncoder
+ decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+ if b.frozenConfig == nil {
+ // default is case-insensitive
+ return false
+ }
+ return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+ return &ctx{
+ frozenConfig: b.frozenConfig,
+ prefix: b.prefix + " " + prefix,
+ encoders: b.encoders,
+ decoders: b.decoders,
+ }
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+ depth := iter.depth
+ cacheKey := reflect2.RTypeOf(obj)
+ decoder := iter.cfg.getDecoderFromCache(cacheKey)
+ if decoder == nil {
+ typ := reflect2.TypeOf(obj)
+ if typ == nil || typ.Kind() != reflect.Ptr {
+ iter.ReportError("ReadVal", "can only unmarshal into pointer")
+ return
+ }
+ decoder = iter.cfg.DecoderOf(typ)
+ }
+ ptr := reflect2.PtrOf(obj)
+ if ptr == nil {
+ iter.ReportError("ReadVal", "can not read into nil pointer")
+ return
+ }
+ decoder.Decode(ptr, iter)
+ if iter.depth != depth {
+ iter.ReportError("ReadVal", "unexpected mismatched nesting")
+ return
+ }
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+ if nil == val {
+ stream.WriteNil()
+ return
+ }
+ cacheKey := reflect2.RTypeOf(val)
+ encoder := stream.cfg.getEncoderFromCache(cacheKey)
+ if encoder == nil {
+ typ := reflect2.TypeOf(val)
+ encoder = stream.cfg.EncoderOf(typ)
+ }
+ encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+ cacheKey := typ.RType()
+ decoder := cfg.getDecoderFromCache(cacheKey)
+ if decoder != nil {
+ return decoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder = decoderOfType(ctx, ptrType.Elem())
+ cfg.addDecoderToCache(cacheKey, decoder)
+ return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoders[typ]
+ if decoder != nil {
+ return decoder
+ }
+ placeholder := &placeholderDecoder{}
+ ctx.decoders[typ] = placeholder
+ decoder = _createDecoderOfType(ctx, typ)
+ placeholder.decoder = decoder
+ return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := createDecoderOfJsonRawMessage(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfJsonNumber(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfMarshaler(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfAny(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfNative(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ switch typ.Kind() {
+ case reflect.Interface:
+ ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+ if isIFace {
+ return &ifaceDecoder{valType: ifaceType}
+ }
+ return &efaceDecoder{}
+ case reflect.Struct:
+ return decoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return decoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return decoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return decoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return decoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+ cacheKey := typ.RType()
+ encoder := cfg.getEncoderFromCache(cacheKey)
+ if encoder != nil {
+ return encoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ encoder = encoderOfType(ctx, typ)
+ if typ.LikePtr() {
+ encoder = &onePtrEncoder{encoder}
+ }
+ cfg.addEncoderToCache(cacheKey, encoder)
+ return encoder
+}
+
+type onePtrEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoders[typ]
+ if encoder != nil {
+ return encoder
+ }
+ placeholder := &placeholderEncoder{}
+ ctx.encoders[typ] = placeholder
+ encoder = _createEncoderOfType(ctx, typ)
+ placeholder.encoder = encoder
+ return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := createEncoderOfJsonRawMessage(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfJsonNumber(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfMarshaler(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfAny(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return encoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return encoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return encoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return encoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+type lazyErrorDecoder struct {
+ err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() != NilValue {
+ if iter.Error == nil {
+ iter.Error = decoder.err
+ }
+ } else {
+ iter.Skip()
+ }
+}
+
+type lazyErrorEncoder struct {
+ err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if ptr == nil {
+ stream.WriteNil()
+ } else if stream.Error == nil {
+ stream.Error = encoder.err
+ }
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type placeholderDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000..13a0b7b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ if arrayType.Len() == 0 {
+ return emptyArrayEncoder{}
+ }
+ encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return true
+}
+
+type arrayEncoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteArrayStart()
+ elemPtr := unsafe.Pointer(ptr)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ for i := 1; i < encoder.arrayType.Len(); i++ {
+ stream.WriteMore()
+ elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+ }
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type arrayDecoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+ }
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ arrayType := decoder.arrayType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ return
+ }
+ iter.unreadByte()
+ elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ if length >= arrayType.Len() {
+ iter.Skip()
+ continue
+ }
+ idx := length
+ length += 1
+ elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000..8b6bc8b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+type dynamicEncoder struct {
+ valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ pObj := (*interface{})(ptr)
+ obj := *pObj
+ if obj == nil {
+ *pObj = iter.Read()
+ return
+ }
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ *pObj = iter.Read()
+ return
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ ptrElemType := ptrType.Elem()
+ if iter.WhatIsNext() == NilValue {
+ if ptrElemType.Kind() != reflect.Ptr {
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *pObj = nil
+ return
+ }
+ }
+ if reflect2.IsNil(obj) {
+ obj := ptrElemType.New()
+ iter.ReadVal(obj)
+ *pObj = obj
+ return
+ }
+ iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+ valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+ return
+ }
+ obj := decoder.valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+ return
+ }
+ iter.ReadVal(obj)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000..74a97bf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+ "unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+ Type reflect2.Type
+ Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+ for _, binding := range structDescriptor.Fields {
+ if binding.Field.Name() == fieldName {
+ return binding
+ }
+ }
+ return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+ levels []int
+ Field reflect2.StructField
+ FromNames []string
+ ToNames []string
+ Encoder ValEncoder
+ Decoder ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+ UpdateStructDescriptor(structDescriptor *StructDescriptor)
+ CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+ CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+ CreateDecoder(typ reflect2.Type) ValDecoder
+ CreateEncoder(typ reflect2.Type) ValEncoder
+ DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+ DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type funcDecoder struct {
+ fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+ fun EncoderFunc
+ isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ if encoder.isEmptyFunc == nil {
+ return false
+ }
+ return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+ typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+ typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+ RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+ fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+ typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+ fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+ extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := _getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ }
+ return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ for _, extension := range extensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ decoder := ctx.decoderExtension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ typeName := typ.String()
+ decoder = typeDecoders[typeName]
+ if decoder != nil {
+ return decoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder := typeDecoders[ptrType.Elem().String()]
+ if decoder != nil {
+ return &OptionalDecoder{ptrType.Elem(), decoder}
+ }
+ }
+ return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := _getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ }
+ return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ for _, extension := range extensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ encoder := ctx.encoderExtension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ typeName := typ.String()
+ encoder = typeEncoders[typeName]
+ if encoder != nil {
+ return encoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ typePtr := typ.(*reflect2.UnsafePtrType)
+ encoder := typeEncoders[typePtr.Elem().String()]
+ if encoder != nil {
+ return &OptionalEncoder{encoder}
+ }
+ }
+ return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+ structType := typ.(*reflect2.UnsafeStructType)
+ embeddedBindings := []*Binding{}
+ bindings := []*Binding{}
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+ if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
+ continue
+ }
+ if tag == "-" || field.Name() == "_" {
+ continue
+ }
+ tagParts := strings.Split(tag, ",")
+ if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+ if field.Type().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, field.Type())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ } else if field.Type().Kind() == reflect.Ptr {
+ ptrType := field.Type().(*reflect2.UnsafePtrType)
+ if ptrType.Elem().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, ptrType.Elem())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &dereferenceEncoder{binding.Encoder}
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ }
+ }
+ }
+ fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+ fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+ decoder := fieldDecoders[fieldCacheKey]
+ if decoder == nil {
+ decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ encoder := fieldEncoders[fieldCacheKey]
+ if encoder == nil {
+ encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ binding := &Binding{
+ Field: field,
+ FromNames: fieldNames,
+ ToNames: fieldNames,
+ Decoder: decoder,
+ Encoder: encoder,
+ }
+ binding.levels = []int{i}
+ bindings = append(bindings, binding)
+ }
+ return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+ structDescriptor := &StructDescriptor{
+ Type: typ,
+ Fields: bindings,
+ }
+ for _, extension := range extensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+ ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+ for _, extension := range ctx.extraExtensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ processTags(structDescriptor, ctx.frozenConfig)
+ // merge normal & embedded bindings & sort with original order
+ allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+ sort.Sort(allBindings)
+ structDescriptor.Fields = allBindings
+ return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+ return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+ left := bindings[i].levels
+ right := bindings[j].levels
+ k := 0
+ for {
+ if left[k] < right[k] {
+ return true
+ } else if left[k] > right[k] {
+ return false
+ }
+ k++
+ }
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+ bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+ for _, binding := range structDescriptor.Fields {
+ shouldOmitEmpty := false
+ tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+ for _, tagPart := range tagParts[1:] {
+ if tagPart == "omitempty" {
+ shouldOmitEmpty = true
+ } else if tagPart == "string" {
+ if binding.Field.Type().Kind() == reflect.String {
+ binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+ binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+ } else {
+ binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+ binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+ }
+ }
+ }
+ binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+ binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+ }
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+ // ignore?
+ if wholeTag == "-" {
+ return []string{}
+ }
+ // rename?
+ var fieldNames []string
+ if tagProvidedFieldName == "" {
+ fieldNames = []string{originalFieldName}
+ } else {
+ fieldNames = []string{tagProvidedFieldName}
+ }
+ // private?
+ isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
+ if isNotExported {
+ fieldNames = []string{}
+ }
+ return fieldNames
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000..98d45c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "strconv"
+ "unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+ switch typedVal := val.(type) {
+ case json.Number:
+ return string(typedVal), true
+ case Number:
+ return string(typedVal), true
+ }
+ return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*json.Number)(ptr)) = json.Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*json.Number)(ptr)) = ""
+ default:
+ *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*json.Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*Number)(ptr)) = Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*Number)(ptr)) = ""
+ default:
+ *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*Number)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000..eba434f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,76 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*json.RawMessage)(ptr)) = nil
+ } else {
+ *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*json.RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+ }
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*RawMessage)(ptr)) = nil
+ } else {
+ *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
+ }
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*RawMessage)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+ }
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000..5829671
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,346 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+ elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+ return &mapDecoder{
+ mapType: mapType,
+ keyType: mapType.Key(),
+ elemType: mapType.Elem(),
+ keyDecoder: keyDecoder,
+ elemDecoder: elemDecoder,
+ }
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ if ctx.sortMapKeys {
+ return &sortKeysMapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+ }
+ return &mapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+
+ switch typ.Kind() {
+ case reflect.String:
+ return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+ default:
+ return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+
+ switch typ.Kind() {
+ case reflect.String:
+ return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+ default:
+ if typ.Kind() == reflect.Interface {
+ return &dynamicMapKeyEncoder{ctx, typ}
+ }
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+type mapDecoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyType reflect2.Type
+ elemType reflect2.Type
+ keyDecoder ValDecoder
+ elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ mapType := decoder.mapType
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ *(*unsafe.Pointer)(ptr) = nil
+ mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+ return
+ }
+ if mapType.UnsafeIsNil(ptr) {
+ mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+ }
+ if c != '{' {
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == '}' {
+ return
+ }
+ iter.unreadByte()
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+ }
+}
+
+type numericMapKeyDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.decoder.Decode(ptr, iter)
+ c = iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
+
+type numericMapKeyEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.encoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type dynamicMapKeyEncoder struct {
+ ctx *ctx
+ valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ for i := 0; iter.HasNext(); i++ {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ key, elem := iter.UnsafeNext()
+ encoder.keyEncoder.Encode(key, stream)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ stream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, stream)
+ }
+ stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ mapIter := encoder.mapType.UnsafeIterate(ptr)
+ subStream := stream.cfg.BorrowStream(nil)
+ subStream.Attachment = stream.Attachment
+ subIter := stream.cfg.BorrowIterator(nil)
+ keyValues := encodedKeyValues{}
+ for mapIter.HasNext() {
+ key, elem := mapIter.UnsafeNext()
+ subStreamIndex := subStream.Buffered()
+ encoder.keyEncoder.Encode(key, subStream)
+ if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ encodedKey := subStream.Buffer()[subStreamIndex:]
+ subIter.ResetBytes(encodedKey)
+ decodedKey := subIter.ReadString()
+ if stream.indention > 0 {
+ subStream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ subStream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, subStream)
+ keyValues = append(keyValues, encodedKV{
+ key: decodedKey,
+ keyValue: subStream.Buffer()[subStreamIndex:],
+ })
+ }
+ sort.Sort(keyValues)
+ for i, keyValue := range keyValues {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ stream.Write(keyValue.keyValue)
+ }
+ if subStream.Error != nil && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ stream.WriteObjectEnd()
+ stream.cfg.ReturnStream(subStream)
+ stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+ key string
+ keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000..3e21f37
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,225 @@
+package jsoniter
+
+import (
+ "encoding"
+ "encoding/json"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{ptrType},
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{ptrType},
+ }
+ }
+ return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == marshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ if typ.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: typ,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ ptrType := reflect2.PtrTo(typ)
+ if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: ptrType,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ if typ == textMarshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directTextMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ return encoder
+ }
+ if typ.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ // if prefix is empty, the type is the root type
+ if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: ptrType,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ return nil
+}
+
+type marshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+ valType reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := obj.(json.Marshaler)
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ // html escape was already done by jsoniter
+ // but the extra '\n' should be trimed
+ l := len(bytes)
+ if l > 0 && bytes[l-1] == '\n' {
+ bytes = bytes[:l-1]
+ }
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*json.Marshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+ valType reflect2.Type
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := (obj).(encoding.TextMarshaler)
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*encoding.TextMarshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ unmarshaler := obj.(json.Unmarshaler)
+ iter.nextToken()
+ iter.unreadByte() // skip spaces
+ bytes := iter.SkipAndReturnBytes()
+ err := unmarshaler.UnmarshalJSON(bytes)
+ if err != nil {
+ iter.ReportError("unmarshalerDecoder", err.Error())
+ }
+}
+
+type textUnmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ ptrType := valType.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elem := elemType.UnsafeNew()
+ ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+ obj = valType.UnsafeIndirect(ptr)
+ }
+ unmarshaler := (obj).(encoding.TextUnmarshaler)
+ str := iter.ReadString()
+ err := unmarshaler.UnmarshalText([]byte(str))
+ if err != nil {
+ iter.ReportError("textUnmarshalerDecoder", err.Error())
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000..f88722d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,453 @@
+package jsoniter
+
+import (
+ "encoding/base64"
+ "reflect"
+ "strconv"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ kind := typ.Kind()
+ switch kind {
+ case reflect.String:
+ if typeName != "string" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ switch typ.Kind() {
+ case reflect.String:
+ if typeName != "string" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int8)(ptr)) = iter.ReadInt8()
+ }
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int16)(ptr)) = iter.ReadInt16()
+ }
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int32)(ptr)) = iter.ReadInt32()
+ }
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int64)(ptr)) = iter.ReadInt64()
+ }
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint8)(ptr)) = iter.ReadUint8()
+ }
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint16)(ptr)) = iter.ReadUint16()
+ }
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint32)(ptr)) = iter.ReadUint32()
+ }
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint64)(ptr)) = iter.ReadUint64()
+ }
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float32)(ptr)) = iter.ReadFloat32()
+ }
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float64)(ptr)) = iter.ReadFloat64()
+ }
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*bool)(ptr)) = iter.ReadBool()
+ }
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+ sliceType *reflect2.UnsafeSliceType
+ sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ codec.sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ switch iter.WhatIsNext() {
+ case StringValue:
+ src := iter.ReadString()
+ dst, err := base64.StdEncoding.DecodeString(src)
+ if err != nil {
+ iter.ReportError("decode base64", err.Error())
+ } else {
+ codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+ }
+ case ArrayValue:
+ codec.sliceDecoder.Decode(ptr, iter)
+ default:
+ iter.ReportError("base64Codec", "invalid input")
+ }
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if codec.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ src := *((*[]byte)(ptr))
+ encoding := base64.StdEncoding
+ stream.writeByte('"')
+ if len(src) != 0 {
+ size := encoding.EncodedLen(len(src))
+ buf := make([]byte, size)
+ encoding.Encode(buf, src)
+ stream.buf = append(stream.buf, buf...)
+ }
+ stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000..fa71f47
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,129 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ decoder := decoderOfType(ctx, elemType)
+ return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elemEncoder := encoderOfType(ctx, elemType)
+ encoder := &OptionalEncoder{elemEncoder}
+ return encoder
+}
+
+type OptionalDecoder struct {
+ ValueType reflect2.Type
+ ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*unsafe.Pointer)(ptr)) = nil
+ } else {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.ValueType.UnsafeNew()
+ decoder.ValueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+ }
+}
+
+type dereferenceDecoder struct {
+ // only to deference a pointer
+ valueType reflect2.Type
+ valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.valueType.UnsafeNew()
+ decoder.valueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+}
+
+type OptionalEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ dePtr := *((*unsafe.Pointer)(ptr))
+ if dePtr == nil {
+ return true
+ }
+ return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ deReferenced := *((*unsafe.Pointer)(ptr))
+ if deReferenced == nil {
+ return true
+ }
+ isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := unsafe.Pointer(deReferenced)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000..9441d79
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if encoder.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ length := encoder.sliceType.UnsafeLengthOf(ptr)
+ if length == 0 {
+ stream.WriteEmptyArray()
+ return
+ }
+ stream.WriteArrayStart()
+ encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+ for i := 1; i < length; i++ {
+ stream.WriteMore()
+ elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+ }
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+ }
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ sliceType := decoder.sliceType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+ return
+ }
+ iter.unreadByte()
+ sliceType.UnsafeGrow(ptr, 1)
+ elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ idx := length
+ length += 1
+ sliceType.UnsafeGrow(ptr, length)
+ elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000..92ae912
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1097 @@
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+ bindings := map[string]*Binding{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, fromName := range binding.FromNames {
+ old := bindings[fromName]
+ if old == nil {
+ bindings[fromName] = binding
+ continue
+ }
+ ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+ if ignoreOld {
+ delete(bindings, fromName)
+ }
+ if !ignoreNew {
+ bindings[fromName] = binding
+ }
+ }
+ }
+ fields := map[string]*structFieldDecoder{}
+ for k, binding := range bindings {
+ fields[k] = binding.Decoder.(*structFieldDecoder)
+ }
+
+ if !ctx.caseSensitive() {
+ for k, binding := range bindings {
+ if _, found := fields[strings.ToLower(k)]; !found {
+ fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+ }
+ }
+ }
+
+ return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+ if ctx.disallowUnknownFields {
+ return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+ }
+ knownHash := map[int64]struct{}{
+ 0: {},
+ }
+
+ switch len(fields) {
+ case 0:
+ return &skipObjectDecoder{typ}
+ case 1:
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+ }
+ case 2:
+ var fieldHash1 int64
+ var fieldHash2 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldHash1 == 0 {
+ fieldHash1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else {
+ fieldHash2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ }
+ }
+ return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+ case 3:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ }
+ }
+ return &threeFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3}
+ case 4:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ }
+ }
+ return &fourFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4}
+ case 5:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ }
+ }
+ return &fiveFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5}
+ case 6:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ }
+ }
+ return &sixFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6}
+ case 7:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ }
+ }
+ return &sevenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7}
+ case 8:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ }
+ }
+ return &eightFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8}
+ case 9:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ }
+ }
+ return &nineFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9}
+ case 10:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldName10 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ var fieldDecoder10 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else if fieldName9 == 0 {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ } else {
+ fieldName10 = fieldHash
+ fieldDecoder10 = fieldDecoder
+ }
+ }
+ return &tenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9,
+ fieldName10, fieldDecoder10}
+ }
+ return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+ typ reflect2.Type
+ fields map[string]*structFieldDecoder
+ disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ var c byte
+ for c = ','; c == ','; c = iter.nextToken() {
+ decoder.decodeOneField(ptr, iter)
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ if c != '}' {
+ iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+ }
+ iter.decrementDepth()
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+ var field string
+ var fieldDecoder *structFieldDecoder
+ if iter.cfg.objectFieldMustBeSimpleString {
+ fieldBytes := iter.ReadStringAsSlice()
+ field = *(*string)(unsafe.Pointer(&fieldBytes))
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ } else {
+ field = iter.ReadString()
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ }
+ if fieldDecoder == nil {
+ if decoder.disallowUnknownFields {
+ msg := "found unknown field: " + field
+ iter.ReportError("ReadObject", msg)
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ iter.Skip()
+ return
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+ typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valueType := iter.WhatIsNext()
+ if valueType != ObjectValue && valueType != NilValue {
+ iter.ReportError("skipObjectDecoder", "expect object or null")
+ return
+ }
+ iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+ typ reflect2.Type
+ fieldHash int64
+ fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ if iter.readFieldHash() == decoder.fieldHash {
+ decoder.fieldDecoder.Decode(ptr, iter)
+ } else {
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type twoFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type threeFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fourFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fiveFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sixFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sevenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type eightFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type nineFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type tenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+ fieldHash10 int64
+ fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ case decoder.fieldHash10:
+ decoder.fieldDecoder10.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type structFieldDecoder struct {
+ field reflect2.StructField
+ fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ fieldPtr := decoder.field.UnsafeGet(ptr)
+ decoder.fieldDecoder.Decode(fieldPtr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+ }
+}
+
+type stringModeStringDecoder struct {
+ elemDecoder ValDecoder
+ cfg *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.elemDecoder.Decode(ptr, iter)
+ str := *((*string)(ptr))
+ tempIter := decoder.cfg.BorrowIterator([]byte(str))
+ defer decoder.cfg.ReturnIterator(tempIter)
+ *((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+ elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() == NilValue {
+ decoder.elemDecoder.Decode(ptr, iter)
+ return
+ }
+
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.elemDecoder.Decode(ptr, iter)
+ if iter.Error != nil {
+ return
+ }
+ c = iter.readByte()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000..152e3ef
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+ type bindingTo struct {
+ binding *Binding
+ toName string
+ ignored bool
+ }
+ orderedBindings := []*bindingTo{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, toName := range binding.ToNames {
+ new := &bindingTo{
+ binding: binding,
+ toName: toName,
+ }
+ for _, old := range orderedBindings {
+ if old.toName != toName {
+ continue
+ }
+ old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+ }
+ orderedBindings = append(orderedBindings, new)
+ }
+ }
+ if len(orderedBindings) == 0 {
+ return &emptyStructEncoder{}
+ }
+ finalOrderedFields := []structFieldTo{}
+ for _, bindingTo := range orderedBindings {
+ if !bindingTo.ignored {
+ finalOrderedFields = append(finalOrderedFields, structFieldTo{
+ encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+ toName: bindingTo.toName,
+ })
+ }
+ }
+ return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+ encoder := createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return &structEncoder{typ: typ}
+ case reflect.Array:
+ return &arrayEncoder{}
+ case reflect.Slice:
+ return &sliceEncoder{}
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return &OptionalEncoder{}
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+ }
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+ newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+ oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+ if newTagged {
+ if oldTagged {
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ } else {
+ return true, false
+ }
+ } else {
+ if oldTagged {
+ return true, false
+ }
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ }
+}
+
+type structFieldEncoder struct {
+ field reflect2.StructField
+ fieldEncoder ValEncoder
+ omitempty bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ encoder.fieldEncoder.Encode(fieldPtr, stream)
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+ }
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+ IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+ typ reflect2.Type
+ fields []structFieldTo
+}
+
+type structFieldTo struct {
+ encoder *structFieldEncoder
+ toName string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteObjectStart()
+ isNotFirst := false
+ for _, field := range encoder.fields {
+ if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+ continue
+ }
+ if field.encoder.IsEmbeddedPtrNil(ptr) {
+ continue
+ }
+ if isNotFirst {
+ stream.WriteMore()
+ }
+ stream.WriteObjectField(field.toName)
+ field.encoder.Encode(ptr, stream)
+ isNotFirst = true
+ }
+ stream.WriteObjectEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+ }
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type stringModeNumberEncoder struct {
+ elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.elemEncoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+ elemEncoder ValEncoder
+ cfg *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ tempStream := encoder.cfg.BorrowStream(nil)
+ tempStream.Attachment = stream.Attachment
+ defer encoder.cfg.ReturnStream(tempStream)
+ encoder.elemEncoder.Encode(ptr, tempStream)
+ stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000..23d8a3a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,210 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+ cfg *frozenConfig
+ out io.Writer
+ buf []byte
+ Error error
+ indention int
+ Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+ return &Stream{
+ cfg: cfg.(*frozenConfig),
+ out: out,
+ buf: make([]byte, 0, bufSize),
+ Error: nil,
+ indention: 0,
+ }
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+ return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+ stream.out = out
+ stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+ return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+ return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+ return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+ stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+ stream.buf = append(stream.buf, p...)
+ if stream.out != nil {
+ nn, err = stream.out.Write(stream.buf)
+ stream.buf = stream.buf[nn:]
+ return
+ }
+ return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+ stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+ stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+ if stream.out == nil {
+ return nil
+ }
+ if stream.Error != nil {
+ return stream.Error
+ }
+ _, err := stream.out.Write(stream.buf)
+ if err != nil {
+ if stream.Error == nil {
+ stream.Error = err
+ }
+ return err
+ }
+ stream.buf = stream.buf[:0]
+ return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+ stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+ stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+ stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+ stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+ if val {
+ stream.WriteTrue()
+ } else {
+ stream.WriteFalse()
+ }
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('{')
+ stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+ stream.WriteString(field)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(':', ' ')
+ } else {
+ stream.writeByte(':')
+ }
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+ stream.writeByte('{')
+ stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+ stream.writeByte(',')
+ stream.writeIndention(0)
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('[')
+ stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+ stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+ if stream.indention == 0 {
+ return
+ }
+ stream.writeByte('\n')
+ toWrite := stream.indention - delta
+ for i := 0; i < toWrite; i++ {
+ stream.buf = append(stream.buf, ' ')
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000..826aa59
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,111 @@
+package jsoniter
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+ pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(float64(val))
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat32(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(float64(val)*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(val)
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat64(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(val*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000..d1059ee
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+ digits = make([]uint32, 1000)
+ for i := uint32(0); i < 1000; i++ {
+ digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+ if i < 10 {
+ digits[i] += 2 << 24
+ } else if i < 100 {
+ digits[i] += 1 << 24
+ }
+ }
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+ start := v >> 24
+ if start == 0 {
+ space = append(space, byte(v>>16), byte(v>>8))
+ } else if start == 1 {
+ space = append(space, byte(v>>8))
+ }
+ space = append(space, byte(v))
+ return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+ return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+ var val uint8
+ if nval < 0 {
+ val = uint8(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint8(nval)
+ }
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+ var val uint16
+ if nval < 0 {
+ val = uint16(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint16(nval)
+ }
+ stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ } else {
+ r3 := q2 - q3*1000
+ stream.buf = append(stream.buf, byte(q3+'0'))
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+ var val uint32
+ if nval < 0 {
+ val = uint32(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint32(nval)
+ }
+ stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r3 := q2 - q3*1000
+ q4 := q3 / 1000
+ if q4 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q3])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r4 := q3 - q4*1000
+ q5 := q4 / 1000
+ if q5 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q4])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r5 := q4 - q5*1000
+ q6 := q5 / 1000
+ if q6 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q5])
+ } else {
+ stream.buf = writeFirstBuf(stream.buf, digits[q6])
+ r6 := q5 - q6*1000
+ stream.buf = writeBuf(stream.buf, digits[r6])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r5])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+ var val uint64
+ if nval < 0 {
+ val = uint64(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint64(nval)
+ }
+ stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+ stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+ stream.WriteUint64(uint64(val))
+}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000..54c2ba0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+ "unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML