diff --git a/.travis.yml b/.travis.yml index ef7e2d891..4e720ca6a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,7 +6,7 @@ os: go: - 1.7.4 - - 1.6.3 + - 1.6.4 - 1.5.4 - tip diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index bf318a433..fd4f7acb9 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -16,30 +16,20 @@ "Comment": "v1.0-rc1-10-g33e0aa1", "Rev": "33e0aa1cb7c019ccc3fbe049a8262a6403d30504" }, - { - "ImportPath": "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec", - "Comment": "v2.3.7", - "Rev": "fd17c9101d94703f6f4c3d8d6cfb72b62b894cd7" - }, - { - "ImportPath": "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context", - "Comment": "v2.3.7", - "Rev": "fd17c9101d94703f6f4c3d8d6cfb72b62b894cd7" - }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v2.3.7", - "Rev": "fd17c9101d94703f6f4c3d8d6cfb72b62b894cd7" + "Comment": "v3.0.15", + "Rev": "fc00305a2e59b4c2d4a53c9fbb4d30741a96ea67" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v2.3.7", - "Rev": "fd17c9101d94703f6f4c3d8d6cfb72b62b894cd7" + "Comment": "v3.0.15", + "Rev": "fc00305a2e59b4c2d4a53c9fbb4d30741a96ea67" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v2.3.7", - "Rev": "fd17c9101d94703f6f4c3d8d6cfb72b62b894cd7" + "Comment": "v3.0.15", + "Rev": "fc00305a2e59b4c2d4a53c9fbb4d30741a96ea67" }, { "ImportPath": "github.com/docopt/docopt-go", @@ -48,33 +38,33 @@ }, { "ImportPath": "github.com/garyburd/redigo/internal", - "Comment": "v1.0.0-1-gb8dc900", - "Rev": "b8dc90050f24c1a73a52f107f3f575be67b21b7c" + "Comment": "v1.0.0-16-g8c052d8", + "Rev": "8c052d82153967815d9093a7e9d3e7fd2e554861" }, { "ImportPath": "github.com/garyburd/redigo/redis", - "Comment": "v1.0.0-1-gb8dc900", - "Rev": "b8dc90050f24c1a73a52f107f3f575be67b21b7c" + "Comment": "v1.0.0-16-g8c052d8", + "Rev": "8c052d82153967815d9093a7e9d3e7fd2e554861" }, { "ImportPath": "github.com/go-martini/martini", - "Comment": "v1.0-185-gc257c41", - "Rev": "c257c412d547ac70fcaf5596c1a50a7cb832c1fc" + "Comment": "v1.0-186-gfe605b5", + "Rev": "fe605b5cd210047ae3bb73d2b69a5b912a9b423d" }, { "ImportPath": "github.com/influxdata/influxdb/client/v2", - "Comment": "v0.13.0", - "Rev": "e57fb88a051ee40fd9277094345fbd47bb4783ce" + "Comment": "v1.1.0-223-g8c2cfd14", + "Rev": "8c2cfd14af2511580277ddfd7a834b6aaa5f19ab" }, { "ImportPath": "github.com/influxdata/influxdb/models", - "Comment": "v0.13.0", - "Rev": "e57fb88a051ee40fd9277094345fbd47bb4783ce" + "Comment": "v1.1.0-223-g8c2cfd14", + "Rev": "8c2cfd14af2511580277ddfd7a834b6aaa5f19ab" }, { "ImportPath": "github.com/influxdata/influxdb/pkg/escape", - "Comment": "v0.13.0", - "Rev": "e57fb88a051ee40fd9277094345fbd47bb4783ce" + "Comment": "v1.1.0-223-g8c2cfd14", + "Rev": "8c2cfd14af2511580277ddfd7a834b6aaa5f19ab" }, { "ImportPath": "github.com/martini-contrib/binding", @@ -94,15 +84,19 @@ }, { "ImportPath": "github.com/samuel/go-zookeeper/zk", - "Rev": "e64db453f3512cade908163702045e0f31137843" + "Rev": "1d7be4effb13d2d908342d349d71a284a7542693" }, { "ImportPath": "github.com/spinlock/jemalloc-go", "Rev": "c02d8364a3e58416d9de7be36db64ee0494eceb7" }, + { + "ImportPath": "github.com/ugorji/go/codec", + "Rev": "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74" + }, { "ImportPath": "golang.org/x/net/context", - "Rev": "4d38db76854b199960801a1734443fd02870d7e1" + "Rev": "60c41d1de8da134c05b7b40154a9a82bf5b7edb9" } ] } diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go b/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go deleted file mode 100644 index 11bd8d34e..000000000 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context/context.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out <-chan Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, &c) - return &c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) cancelCtx { - return cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return &c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md index e9e4be468..0bab9589c 100644 --- a/vendor/github.com/coreos/etcd/client/README.md +++ b/vendor/github.com/coreos/etcd/client/README.md @@ -4,6 +4,13 @@ etcd/client is the Go client library for etcd. [![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) +etcd uses `cmd/vendor` directory to store external dependencies, which are +to be compiled into etcd release binaries. `client` can be imported without +vendoring. For full compatibility, it is recommended to vendor builds using +etcd's vendored packages, using tools like godep, as in +[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). +For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). + ## Install ```bash @@ -19,7 +26,7 @@ import ( "log" "time" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "golang.org/x/net/context" "github.com/coreos/etcd/client" ) diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go index 06378fb39..d15e00dd7 100644 --- a/vendor/github.com/coreos/etcd/client/auth_role.go +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import ( "net/http" "net/url" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "golang.org/x/net/context" ) type Role struct { diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go index e075c4fd9..97c3f3181 100644 --- a/vendor/github.com/coreos/etcd/client/auth_user.go +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "net/url" "path" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "golang.org/x/net/context" ) var ( @@ -47,10 +47,6 @@ type UserRoles struct { Roles []Role `json:"roles"` } -type userName struct { - User string `json:"user"` -} - func v2AuthURL(ep url.URL, action string, name string) *url.URL { if name != "" { ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go index fefdb40e4..76d1f0401 100644 --- a/vendor/github.com/coreos/etcd/client/cancelreq.go +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -4,8 +4,6 @@ // borrowed from golang/net/context/ctxhttp/cancelreq.go -// +build go1.5 - package client import "net/http" diff --git a/vendor/github.com/coreos/etcd/client/cancelreq_go14.go b/vendor/github.com/coreos/etcd/client/cancelreq_go14.go deleted file mode 100644 index 2bed38a41..000000000 --- a/vendor/github.com/coreos/etcd/client/cancelreq_go14.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq_go14.go - -// +build !go1.5 - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - return func() { - tr.CancelRequest(req) - } -} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index b0581a955..eeeb8b57a 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "golang.org/x/net/context" ) var ( @@ -37,6 +37,10 @@ var ( ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} ) var DefaultRequestTimeout = 5 * time.Second @@ -335,6 +339,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo var body []byte var err error cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil for i := pinned; i < leps+pinned; i++ { k := i % leps @@ -348,6 +353,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } + if isOneShot { + return nil, nil, err + } continue } if resp.StatusCode/100 == 5 { @@ -358,6 +366,9 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } + if isOneShot { + return nil, nil, cerr.Errors[0] + } continue } if k != pinned { diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go index 957ed4624..aef5bf755 100644 --- a/vendor/github.com/coreos/etcd/client/cluster_error.go +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go index 5a5a69a94..c8bc9fba2 100644 --- a/vendor/github.com/coreos/etcd/client/curl.go +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go index ae88659f4..bfd7aec93 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go index 70111cace..32fdfb52c 100644 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -34,6 +34,8 @@ Create a Config and exchange it for a Client: // handle error } +Clients are safe for concurrent use by multiple goroutines. + Create a KeysAPI using the Client, then use it to interact with etcd: kAPI := client.NewKeysAPI(c) diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go index feac0d1d8..748283aa9 100644 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -8,7 +8,7 @@ package client import ( "errors" "fmt" - codec1978 "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec" + codec1978 "github.com/ugorji/go/codec" "reflect" "runtime" time "time" diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go index 7e07f722c..62d5d506e 100644 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -26,9 +26,9 @@ import ( "strings" "time" - "github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" + "golang.org/x/net/context" ) const ( @@ -337,7 +337,11 @@ func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions act.Dir = opts.Dir } - resp, body, err := k.client.Do(ctx, act) + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) if err != nil { return nil, err } @@ -385,7 +389,8 @@ func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOption act.Recursive = opts.Recursive } - resp, body, err := k.client.Do(ctx, act) + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go index 71b01b27f..23adf07ad 100644 --- a/vendor/github.com/coreos/etcd/client/members.go +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ import ( "net/url" "path" - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" + "golang.org/x/net/context" "github.com/coreos/etcd/pkg/types" ) diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go index 06197967c..fdfa34359 100644 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ b/vendor/github.com/coreos/etcd/client/srv.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go index fc0800b3d..198bff965 100644 --- a/vendor/github.com/coreos/etcd/client/util.go +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go index 04b4c38d1..de8ef0bd7 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/doc.go +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go index 88cb9e634..1b042d9ce 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go index bb997174c..73ef431be 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go index 0327950f7..0dd9ca798 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/slice.go +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go index ce2483ffa..9e5d03ff6 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/urls.go +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,8 +36,8 @@ func NewURLs(strs []string) (URLs, error) { if err != nil { return nil, err } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("URL scheme must be http or https: %s", in) + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) } if _, _, err := net.SplitHostPort(u.Host); err != nil { return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) @@ -53,6 +53,14 @@ func NewURLs(strs []string) (URLs, error) { return us, nil } +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + func (us URLs) String() string { return strings.Join(us.StringSlice(), ",") } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go index 4fe9218c7..47690cc38 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -40,6 +40,20 @@ func NewURLsMap(s string) (URLsMap, error) { return cl, nil } +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + // String turns URLsMap into discovery-formatted name-to-URLs sorted by name. func (c URLsMap) String() string { var pairs []string diff --git a/vendor/github.com/garyburd/redigo/redis/conn.go b/vendor/github.com/garyburd/redigo/redis/conn.go index ed358c601..84c3b9c35 100644 --- a/vendor/github.com/garyburd/redigo/redis/conn.go +++ b/vendor/github.com/garyburd/redigo/redis/conn.go @@ -17,6 +17,7 @@ package redis import ( "bufio" "bytes" + "crypto/tls" "errors" "fmt" "io" @@ -75,6 +76,9 @@ type dialOptions struct { dial func(network, addr string) (net.Conn, error) db int password string + dialTLS bool + skipVerify bool + tlsConfig *tls.Config } // DialReadTimeout specifies the timeout for reading a single command reply. @@ -123,6 +127,22 @@ func DialPassword(password string) DialOption { }} } +// DialTLSConfig specifies the config to use when a TLS connection is dialed. +// Has no effect when not dialing a TLS connection. +func DialTLSConfig(c *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsConfig = c + }} +} + +// DialTLSSkipVerify to disable server name verification when connecting +// over TLS. Has no effect when not dialing a TLS connection. +func DialTLSSkipVerify(skip bool) DialOption { + return DialOption{func(do *dialOptions) { + do.skipVerify = skip + }} +} + // Dial connects to the Redis server at the given network and // address using the specified options. func Dial(network, address string, options ...DialOption) (Conn, error) { @@ -137,6 +157,26 @@ func Dial(network, address string, options ...DialOption) (Conn, error) { if err != nil { return nil, err } + + if do.dialTLS { + tlsConfig := cloneTLSClientConfig(do.tlsConfig, do.skipVerify) + if tlsConfig.ServerName == "" { + host, _, err := net.SplitHostPort(address) + if err != nil { + netConn.Close() + return nil, err + } + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.Handshake(); err != nil { + netConn.Close() + return nil, err + } + netConn = tlsConn + } + c := &conn{ conn: netConn, bw: bufio.NewWriter(netConn), @@ -162,6 +202,10 @@ func Dial(network, address string, options ...DialOption) (Conn, error) { return c, nil } +func dialTLS(do *dialOptions) { + do.dialTLS = true +} + var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) // DialURL connects to a Redis server at the given URL using the Redis @@ -173,7 +217,7 @@ func DialURL(rawurl string, options ...DialOption) (Conn, error) { return nil, err } - if u.Scheme != "redis" { + if u.Scheme != "redis" && u.Scheme != "rediss" { return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) } @@ -213,6 +257,10 @@ func DialURL(rawurl string, options ...DialOption) (Conn, error) { return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) } + if u.Scheme == "rediss" { + options = append([]DialOption{{dialTLS}}, options...) + } + return Dial("tcp", address, options...) } diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go index f3495bae0..5f7fdd2a6 100644 --- a/vendor/github.com/garyburd/redigo/redis/doc.go +++ b/vendor/github.com/garyburd/redigo/redis/doc.go @@ -99,7 +99,7 @@ // // Concurrency // -// Connections support one concurrent caller to the Recieve method and one +// Connections support one concurrent caller to the Receive method and one // concurrent caller to the Send and Flush methods. No other concurrency is // supported including concurrent calls to the Do method. // @@ -165,4 +165,13 @@ // if _, err := redis.Scan(reply, &value1, &value2); err != nil { // // handle error // } +// +// Errors +// +// Connection methods return error replies from the server as type redis.Error. +// +// Call the connection Err() method to determine if the connection encountered +// non-recoverable error such as a network error or protocol parsing error. If +// Err() returns a non-nil value, then the connection is not usable and should +// be closed. package redis diff --git a/vendor/github.com/garyburd/redigo/redis/go17.go b/vendor/github.com/garyburd/redigo/redis/go17.go new file mode 100644 index 000000000..3f951e5ef --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/go17.go @@ -0,0 +1,33 @@ +// +build go1.7 + +package redis + +import "crypto/tls" + +// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case +func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config { + if cfg == nil { + return &tls.Config{InsecureSkipVerify: skipVerify} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/github.com/garyburd/redigo/redis/pool.go b/vendor/github.com/garyburd/redigo/redis/pool.go index d66ef84b6..283a41d5a 100644 --- a/vendor/github.com/garyburd/redigo/redis/pool.go +++ b/vendor/github.com/garyburd/redigo/redis/pool.go @@ -46,40 +46,26 @@ var ( // // The following example shows how to use a pool in a web application. The // application creates a pool at application startup and makes it available to -// request handlers using a global variable. +// request handlers using a package level variable. The pool configuration used +// here is an example, not a recommendation. // -// func newPool(server, password string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// return c, err -// }, -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// _, err := c.Do("PING") -// return err -// }, -// } +// func newPool(addr string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, +// } // } // // var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// redisPassword = flag.String("redisPassword", "", "") +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") // ) // // func main() { -// flag.Parse() -// pool = newPool(*redisServer, *redisPassword) -// ... +// flag.Parse() +// pool = newPool(*redisServer) +// ... // } // // A request handler gets a connection from the pool and closes the connection @@ -88,7 +74,44 @@ var ( // func serveHome(w http.ResponseWriter, r *http.Request) { // conn := pool.Get() // defer conn.Close() -// .... +// ... +// } +// +// Use the Dial function to authenticate connections with the AUTH command or +// select a database with the SELECT command: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// if _, err := c.Do("SELECT", db); err != nil { +// c.Close() +// return nil, err +// } +// return c, nil +// } +// } +// +// Use the TestOnBorrow function to check the health of an idle connection +// before the connection is returned to the application. This example PINGs +// connections that have been idle more than a minute: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// if time.Since(t) < time.Minute { +// return nil +// } +// _, err := c.Do("PING") +// return err +// }, // } // type Pool struct { diff --git a/vendor/github.com/garyburd/redigo/redis/pre_go17.go b/vendor/github.com/garyburd/redigo/redis/pre_go17.go new file mode 100644 index 000000000..0212f60fb --- /dev/null +++ b/vendor/github.com/garyburd/redigo/redis/pre_go17.go @@ -0,0 +1,31 @@ +// +build !go1.7 + +package redis + +import "crypto/tls" + +// similar cloneTLSClientConfig in the stdlib, but also honor skipVerify for the nil case +func cloneTLSClientConfig(cfg *tls.Config, skipVerify bool) *tls.Config { + if cfg == nil { + return &tls.Config{InsecureSkipVerify: skipVerify} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/garyburd/redigo/redis/redis.go b/vendor/github.com/garyburd/redigo/redis/redis.go index c90a48ed4..b7298298c 100644 --- a/vendor/github.com/garyburd/redigo/redis/redis.go +++ b/vendor/github.com/garyburd/redigo/redis/redis.go @@ -24,10 +24,7 @@ type Conn interface { // Close closes the connection. Close() error - // Err returns a non-nil value if the connection is broken. The returned - // value is either the first non-nil value returned from the underlying - // network connection or a protocol parsing error. Applications should - // close broken connections. + // Err returns a non-nil value when the connection is not usable. Err() error // Do sends a command to the server and returns the received reply. diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md index f0794abc1..12280798a 100644 --- a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -1,27 +1,25 @@ # List - bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) - collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) -- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE) - github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) - github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) - github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) - github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) - github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) -- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) - github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) -- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE) -- github.com/hashicorp/raft-boltdb [MOZILLA PUBLIC LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) - github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) - github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) -- github.com/kimor79/gollectd [BSD LICENSE](https://github.com/kimor79/gollectd/blob/master/LICENSE) - github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) - github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) - github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) +- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) +- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) - glyphicons [LICENSE](http://glyphicons.com/license/) - golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) -- golang.org/x/tools [BSD LICENSE](https://github.com/golang/tools/blob/master/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT LICENSE](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) - react 0.13.3 [BSD LICENSE](https://github.com/facebook/react/blob/master/LICENSE) diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go index 0bcf77c36..66c210d95 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -1,3 +1,4 @@ +// Package client (v2) is the current official Go client for InfluxDB. package client import ( @@ -7,7 +8,6 @@ import ( "errors" "fmt" "io/ioutil" - "net" "net/http" "net/url" "time" @@ -15,32 +15,26 @@ import ( "github.com/influxdata/influxdb/models" ) -// UDPPayloadSize is a reasonable default payload size for UDP packets that -// could be travelling over the internet. -const ( - UDPPayloadSize = 512 -) - -// HTTPConfig is the config data needed to create an HTTP Client +// HTTPConfig is the config data needed to create an HTTP Client. type HTTPConfig struct { // Addr should be of the form "http://host:port" // or "http://[ipv6-host%zone]:port". Addr string - // Username is the influxdb username, optional + // Username is the influxdb username, optional. Username string - // Password is the influxdb password, optional + // Password is the influxdb password, optional. Password string - // UserAgent is the http User Agent, defaults to "InfluxDBClient" + // UserAgent is the http User Agent, defaults to "InfluxDBClient". UserAgent string - // Timeout for influxdb writes, defaults to no timeout + // Timeout for influxdb writes, defaults to no timeout. Timeout time.Duration // InsecureSkipVerify gets passed to the http client, if true, it will - // skip https certificate verification. Defaults to false + // skip https certificate verification. Defaults to false. InsecureSkipVerify bool // TLSConfig allows the user to set their own TLS config for the HTTP @@ -48,35 +42,25 @@ type HTTPConfig struct { TLSConfig *tls.Config } -// UDPConfig is the config data needed to create a UDP Client -type UDPConfig struct { - // Addr should be of the form "host:port" - // or "[ipv6-host%zone]:port". - Addr string - - // PayloadSize is the maximum size of a UDP client message, optional - // Tune this based on your network. Defaults to UDPBufferSize. - PayloadSize int -} - -// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. type BatchPointsConfig struct { - // Precision is the write precision of the points, defaults to "ns" + // Precision is the write precision of the points, defaults to "ns". Precision string - // Database is the database to write points to + // Database is the database to write points to. Database string - // RetentionPolicy is the retention policy of the points + // RetentionPolicy is the retention policy of the points. RetentionPolicy string - // Write consistency is the number of servers required to confirm write + // Write consistency is the number of servers required to confirm write. WriteConsistency string } -// Client is a client interface for writing & querying the database +// Client is a client interface for writing & querying the database. type Client interface { - // Ping checks that status of cluster + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. Ping(timeout time.Duration) (time.Duration, string, error) // Write takes a BatchPoints object and writes all Points to InfluxDB. @@ -177,42 +161,6 @@ func (c *client) Close() error { return nil } -// NewUDPClient returns a client interface for writing to an InfluxDB UDP -// service from the given config. -func NewUDPClient(conf UDPConfig) (Client, error) { - var udpAddr *net.UDPAddr - udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) - if err != nil { - return nil, err - } - - conn, err := net.DialUDP("udp", nil, udpAddr) - if err != nil { - return nil, err - } - - payloadSize := conf.PayloadSize - if payloadSize == 0 { - payloadSize = UDPPayloadSize - } - - return &udpclient{ - conn: conn, - payloadSize: payloadSize, - }, nil -} - -// Ping will check to see if the server is up with an optional timeout on waiting for leader. -// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. -func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { - return 0, "", nil -} - -// Close releases the udpclient's resources. -func (uc *udpclient) Close() error { - return uc.conn.Close() -} - // client is safe for concurrent use as the fields are all read-only // once the client is instantiated. type client struct { @@ -226,40 +174,35 @@ type client struct { transport *http.Transport } -type udpclient struct { - conn *net.UDPConn - payloadSize int -} - // BatchPoints is an interface into a batched grouping of points to write into // InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate // batch for each goroutine. type BatchPoints interface { - // AddPoint adds the given point to the Batch of points + // AddPoint adds the given point to the Batch of points. AddPoint(p *Point) - // AddPoints adds the given points to the Batch of points + // AddPoints adds the given points to the Batch of points. AddPoints(ps []*Point) - // Points lists the points in the Batch + // Points lists the points in the Batch. Points() []*Point - // Precision returns the currently set precision of this Batch + // Precision returns the currently set precision of this Batch. Precision() string // SetPrecision sets the precision of this batch. SetPrecision(s string) error - // Database returns the currently set database of this Batch + // Database returns the currently set database of this Batch. Database() string - // SetDatabase sets the database of this Batch + // SetDatabase sets the database of this Batch. SetDatabase(s string) - // WriteConsistency returns the currently set write consistency of this Batch + // WriteConsistency returns the currently set write consistency of this Batch. WriteConsistency() string - // SetWriteConsistency sets the write consistency of this Batch + // SetWriteConsistency sets the write consistency of this Batch. SetWriteConsistency(s string) - // RetentionPolicy returns the currently set retention policy of this Batch + // RetentionPolicy returns the currently set retention policy of this Batch. RetentionPolicy() string - // SetRetentionPolicy sets the retention policy of this Batch + // SetRetentionPolicy sets the retention policy of this Batch. SetRetentionPolicy(s string) } @@ -336,7 +279,7 @@ func (bp *batchpoints) SetRetentionPolicy(rp string) { bp.retentionPolicy = rp } -// Point represents a single data point +// Point represents a single data point. type Point struct { pt models.Point } @@ -356,7 +299,7 @@ func NewPoint( T = t[0] } - pt, err := models.NewPoint(name, tags, fields, T) + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) if err != nil { return nil, err } @@ -365,38 +308,39 @@ func NewPoint( }, nil } -// String returns a line-protocol string of the Point +// String returns a line-protocol string of the Point. func (p *Point) String() string { return p.pt.String() } -// PrecisionString returns a line-protocol string of the Point, at precision +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. func (p *Point) PrecisionString(precison string) string { return p.pt.PrecisionString(precison) } -// Name returns the measurement name of the point +// Name returns the measurement name of the point. func (p *Point) Name() string { return p.pt.Name() } -// Tags returns the tags associated with the point +// Tags returns the tags associated with the point. func (p *Point) Tags() map[string]string { - return p.pt.Tags() + return p.pt.Tags().Map() } -// Time return the timestamp for the point +// Time return the timestamp for the point. func (p *Point) Time() time.Time { return p.pt.Time() } -// UnixNano returns the unix nano time of the point +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. func (p *Point) UnixNano() int64 { return p.pt.UnixNano() } -// Fields returns the fields for the point -func (p *Point) Fields() map[string]interface{} { +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { return p.pt.Fields() } @@ -405,31 +349,6 @@ func NewPointFrom(pt models.Point) *Point { return &Point{pt: pt} } -func (uc *udpclient) Write(bp BatchPoints) error { - var b bytes.Buffer - var d time.Duration - d, _ = time.ParseDuration("1" + bp.Precision()) - - for _, p := range bp.Points() { - pointstring := p.pt.RoundedString(d) + "\n" - - // Write and reset the buffer if we reach the max size - if b.Len()+len(pointstring) >= uc.payloadSize { - if _, err := uc.conn.Write(b.Bytes()); err != nil { - return err - } - b.Reset() - } - - if _, err := b.WriteString(pointstring); err != nil { - return err - } - } - - _, err := uc.conn.Write(b.Bytes()) - return err -} - func (c *client) Write(bp BatchPoints) error { var b bytes.Buffer @@ -481,21 +400,34 @@ func (c *client) Write(bp BatchPoints) error { return nil } -// Query defines a query to send to the server +// Query defines a query to send to the server. type Query struct { - Command string - Database string - Precision string + Command string + Database string + Precision string + Parameters map[string]interface{} } -// NewQuery returns a query object -// database and precision strings can be empty strings if they are not needed -// for the query. +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. func NewQuery(command, database, precision string) Query { return Query{ - Command: command, - Database: database, - Precision: precision, + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, } } @@ -506,7 +438,7 @@ type Response struct { } // Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. +// It returns nil if no errors occurred on any statements. func (r *Response) Error() error { if r.Err != "" { return fmt.Errorf(r.Err) @@ -532,21 +464,25 @@ type Result struct { Err string `json:"error,omitempty"` } -func (uc *udpclient) Query(q Query) (*Response, error) { - return nil, fmt.Errorf("Querying via UDP is not supported") -} - -// Query sends a command to the server and returns the Response +// Query sends a command to the server and returns the Response. func (c *client) Query(q Query) (*Response, error) { u := c.url u.Path = "query" + jsonParameters, err := json.Marshal(q.Parameters) + + if err != nil { + return nil, err + } + req, err := http.NewRequest("POST", u.String(), nil) if err != nil { return nil, err } + req.Header.Set("Content-Type", "") req.Header.Set("User-Agent", c.useragent) + if c.username != "" { req.SetBasicAuth(c.username, c.password) } @@ -554,6 +490,8 @@ func (c *client) Query(q Query) (*Response, error) { params := req.URL.Query() params.Set("q", q.Command) params.Set("db", q.Database) + params.Set("params", string(jsonParameters)) + if q.Precision != "" { params.Set("epoch", q.Precision) } diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 000000000..779a28b33 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go index 97cdc51aa..2a3269bca 100644 --- a/vendor/github.com/influxdata/influxdb/models/consistency.go +++ b/vendor/github.com/influxdata/influxdb/models/consistency.go @@ -6,20 +6,22 @@ import ( ) // ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful +// be returned as successful. +// +// The consistency level is handled in open-source InfluxDB but only applicable to clusters. type ConsistencyLevel int const ( - // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet + // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. ConsistencyLevelAny ConsistencyLevel = iota - // ConsistencyLevelOne requires at least one data node acknowledged a write + // ConsistencyLevelOne requires at least one data node acknowledged a write. ConsistencyLevelOne - // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. ConsistencyLevelQuorum - // ConsistencyLevelAll requires all data nodes to acknowledge a write + // ConsistencyLevelAll requires all data nodes to acknowledge a write. ConsistencyLevelAll ) @@ -29,7 +31,7 @@ var ( ErrInvalidConsistencyLevel = errors.New("invalid consistency level") ) -// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { switch strings.ToLower(level) { case "any": diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go new file mode 100644 index 000000000..1d8ae2982 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go new file mode 100644 index 000000000..727ce3580 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -0,0 +1,38 @@ +package models + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go index d83fe24d9..f415e0182 100644 --- a/vendor/github.com/influxdata/influxdb/models/points.go +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -1,3 +1,4 @@ +// Package models implements basic objects used throughout the TICK stack. package models import ( @@ -5,7 +6,6 @@ import ( "encoding/binary" "errors" "fmt" - "hash/fnv" "math" "sort" "strconv" @@ -27,61 +27,155 @@ var ( '=': []byte(`\=`), } - ErrPointMustHaveAField = errors.New("point without fields is unsupported") - ErrInvalidNumber = errors.New("invalid number") - ErrMaxKeyLengthExceeded = errors.New("max key length exceeded") + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") ) const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. MaxKeyLength = 65535 ) -// Point defines the values that will be written to the database +// Point defines the values that will be written to the database. type Point interface { + // Name return the measurement name for the point. Name() string + + // SetName updates the measurement name for the point. SetName(string) + // Tags returns the tag set for the point. Tags() Tags + + // AddTag adds or replaces a tag value for a point. AddTag(key, value string) + + // SetTags replaces the tags for the point. SetTags(tags Tags) - Fields() Fields + // Fields returns the fields for the point. + Fields() (Fields, error) + // Time return the timestamp for the point. Time() time.Time + + // SetTime updates the timestamp for the point. SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. UnixNano() int64 + // HashID returns a non-cryptographic checksum of the point's key. HashID() uint64 - Key() []byte - Data() []byte - SetData(buf []byte) + // Key returns the key (measurement joined with tags) of the point. + Key() []byte - // String returns a string representation of the point, if there is a + // String returns a string representation of the point. If there is a // timestamp associated with the point then it will be specified with the default - // precision of nanoseconds + // precision of nanoseconds. String() string - // Bytes returns a []byte representation of the point similar to string. + // MarshalBinary returns a binary representation of the point. MarshalBinary() ([]byte, error) - // PrecisionString returns a string representation of the point, if there + // PrecisionString returns a string representation of the point. If there // is a timestamp associated with the point then it will be specified in the - // given unit + // given unit. PrecisionString(precision string) string - // RoundedString returns a string representation of the point, if there + // RoundedString returns a string representation of the point. If there // is a timestamp associated with the point, then it will be rounded to the - // given duration + // given duration. RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Delete deletes the current field. + Delete() + + // Reset resets the iterator to its initial state. + Reset() } // Points represents a sortable list of points by timestamp. type Points []Point -func (a Points) Len() int { return len(a) } +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } -func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // point is the default implementation of Point. type point struct { @@ -98,14 +192,16 @@ type point struct { // text encoding of timestamp ts []byte - // binary encoded field data - data []byte - // cached version of parsed fields from data cachedFields map[string]interface{} // cached version of parsed name from key cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator } const ( @@ -131,21 +227,20 @@ func ParsePoints(buf []byte) ([]Point, error) { return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") } -// ParsePointsString is identical to ParsePoints but accepts a string -// buffer. +// ParsePointsString is identical to ParsePoints but accepts a string. func ParsePointsString(buf string) ([]Point, error) { return ParsePoints([]byte(buf)) } // ParseKey returns the measurement name and tags from a point. -func ParseKey(buf string) (string, Tags, error) { +func ParseKey(buf []byte) (string, Tags, error) { // Ignore the error because scanMeasurement returns "missing fields" which we ignore // when just parsing a key - state, i, _ := scanMeasurement([]byte(buf), 0) + state, i, _ := scanMeasurement(buf, 0) var tags Tags if state == tagKeyState { - tags = parseTags([]byte(buf)) + tags = parseTags(buf) // scanMeasurement returns the location of the comma if there are tags, strip that off return string(buf[:i-1]), tags, nil } @@ -155,18 +250,18 @@ func ParseKey(buf string) (string, Tags, error) { // ParsePointsWithPrecision is similar to ParsePoints, but allows the // caller to provide a precision for time. func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { - points := []Point{} + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) var ( pos int block []byte failed []string ) - for { + for pos < len(buf) { pos, block = scanLine(buf, pos) pos++ if len(block) == 0 { - break + continue } // lines which start with '#' are comments @@ -186,17 +281,13 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin block = block[:len(block)-1] } - pt, err := parsePoint(block[start:len(block)], defaultTime, precision) + pt, err := parsePoint(block[start:], defaultTime, precision) if err != nil { failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err)) } else { points = append(points, pt) } - if pos >= len(buf) { - break - } - } if len(failed) > 0 { return points, fmt.Errorf("%s", strings.Join(failed, "\n")) @@ -234,7 +325,6 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err // scan the last block which is an optional integer timestamp pos, ts, err := scanTime(buf, pos) - if err != nil { return nil, err } @@ -249,7 +339,7 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err pt.time = defaultTime pt.SetPrecision(precision) } else { - ts, err := strconv.ParseInt(string(ts), 10, 64) + ts, err := parseIntBytes(ts, 10, 64) if err != nil { return nil, err } @@ -257,11 +347,20 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err if err != nil { return nil, err } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } } return pt, nil } -// GetPrecisionMultiplier will return a multiplier for the precision specified +// GetPrecisionMultiplier will return a multiplier for the precision specified. func GetPrecisionMultiplier(precision string) int64 { d := time.Nanosecond switch precision { @@ -315,24 +414,24 @@ func scanKey(buf []byte, i int) (int, []byte, error) { } } - // Now we know where the key region is within buf, and the locations of tags, we - // need to determine if duplicate tags exist and if the tags are sorted. This iterates - // 1/2 of the list comparing each end with each other, walking towards the center from - // both sides. - for j := 0; j < commas/2; j++ { + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { // get the left and right tags _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=') - - // If the tags are equal, then there are duplicate tags, and we should abort - if bytes.Equal(left, right) { - return i, buf[start:i], fmt.Errorf("duplicate tags") - } - - // If left is greater than right, the tags are not sorted. We must continue - // since their could be duplicate tags still. - if bytes.Compare(left, right) > 0 { + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") } } @@ -358,6 +457,20 @@ func scanKey(buf []byte, i int) (int, []byte, error) { pos += copy(b[pos:], v) } + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + return i, b, nil } @@ -378,7 +491,7 @@ func scanMeasurement(buf []byte, i int) (int, int, error) { // Check first byte of measurement, anything except a comma is fine. // It can't be a space, since whitespace is stripped prior to this // function call. - if buf[i] == ',' { + if i >= len(buf) || buf[i] == ',' { return -1, i, fmt.Errorf("missing measurement") } @@ -525,17 +638,8 @@ func less(buf []byte, indices []int, i, j int) bool { return bytes.Compare(a, b) < 0 } -func isFieldEscapeChar(b byte) bool { - for c := range escape.Codes { - if c == b { - return true - } - } - return false -} - // scanFields scans buf, starting at i for the fields section of a point. It returns -// the ending position and the byte slice of the fields within buf +// the ending position and the byte slice of the fields within buf. func scanFields(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start @@ -634,32 +738,34 @@ func scanFields(buf []byte, i int) (int, []byte, error) { return i, buf[start:i], nil } -// scanTime scans buf, starting at i for the time section of a point. It returns -// the ending position and the byte slice of the fields within buf and error if the -// timestamp is not in the correct numeric format +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. func scanTime(buf []byte, i int) (int, []byte, error) { start := skipWhitespace(buf, i) i = start + for { // reached the end of buf? if i >= len(buf) { break } - // Timestamps should be integers, make sure they are so we don't need to actually - // parse the timestamp until needed - if buf[i] < '0' || buf[i] > '9' { - // Handle negative timestamps - if i == start && buf[i] == '-' { - i++ - continue - } - return i, buf[start:i], fmt.Errorf("bad timestamp") + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break } - // reached end of block? - if buf[i] == '\n' { - break + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") } i++ } @@ -770,14 +876,14 @@ func scanNumber(buf []byte, i int) (int, error) { // Parse the int to check bounds the number of digits could be larger than the max range // We subtract 1 from the index to remove the `i` from our tests if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { - if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) } } } else { // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { - if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { return i, fmt.Errorf("invalid float") } } @@ -846,7 +952,7 @@ func scanBoolean(buf []byte, i int) (int, []byte, error) { } // skipWhitespace returns the end position within buf, starting at i after -// scanning over spaces in tags +// scanning over spaces in tags. func skipWhitespace(buf []byte, i int) int { for i < len(buf) { if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { @@ -973,19 +1079,18 @@ func scanTagValue(buf []byte, i int) (int, []byte) { } i++ } + if i > len(buf) { + return i, nil + } return i, buf[start:i] } func scanFieldValue(buf []byte, i int) (int, []byte) { start := i quoted := false - for { - if i >= len(buf) { - break - } - - // Only escape char for a field value is a double-quote - if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' { + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { i += 2 continue } @@ -1029,6 +1134,10 @@ func escapeTag(in []byte) []byte { } func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + for b, esc := range tagEscapeCodes { if bytes.IndexByte(in, b) != -1 { in = bytes.Replace(in, esc, []byte{b}, -1) @@ -1037,38 +1146,21 @@ func unescapeTag(in []byte) []byte { return in } -// escapeStringField returns a copy of in with any double quotes or -// backslashes with escaped values -func escapeStringField(in string) string { - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // escape double-quotes - if in[i] == '\\' { - out = append(out, '\\') - out = append(out, '\\') - i++ - continue - } - // escape double-quotes - if in[i] == '"' { - out = append(out, '\\') - out = append(out, '"') - i++ - continue - } - out = append(out, in[i]) - i++ +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) - } - return string(out) +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) } // unescapeStringField returns a copy of in with any escaped double-quotes -// or backslashes unescaped +// or backslashes unescaped. func unescapeStringField(in string) string { if strings.IndexByte(in, '\\') == -1 { return in @@ -1101,20 +1193,42 @@ func unescapeStringField(in string) string { // NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If // an unsupported field value (NaN) or out of range time is passed, this function returns an error. -func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) { +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { if len(fields) == 0 { return nil, ErrPointMustHaveAField } - if !time.IsZero() { - if err := CheckTime(time); err != nil { + + if !t.IsZero() { + if err := CheckTime(t); err != nil { return nil, err } } for key, value := range fields { - if fv, ok := value.(float64); ok { + switch value := value.(type) { + case float64: // Ensure the caller validates and handles invalid field values - if math.IsNaN(fv) { + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(float64(value)) { return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) } } @@ -1123,16 +1237,12 @@ func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, err } } - key := MakeKey([]byte(name), tags) + key := MakeKey([]byte(measurement), tags) if len(key) > MaxKeyLength { return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) } - return &point{ - key: key, - time: time, - fields: fields.MarshalBinary(), - }, nil + return key, nil } // NewPointFromBytes returns a new Point from a marshalled Point. @@ -1141,7 +1251,11 @@ func NewPointFromBytes(b []byte) (Point, error) { if err := p.UnmarshalBinary(b); err != nil { return nil, err } - if len(p.Fields()) == 0 { + fields, err := p.Fields() + if err != nil { + return nil, err + } + if len(fields) == 0 { return nil, ErrPointMustHaveAField } return p, nil @@ -1157,14 +1271,7 @@ func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { return pt } -func (p *point) Data() []byte { - return p.data -} - -func (p *point) SetData(b []byte) { - p.data = b -} - +// Key returns the key (measurement joined with tags) of the point. func (p *point) Key() []byte { return p.key } @@ -1174,7 +1281,7 @@ func (p *point) name() []byte { return name } -// Name return the measurement name for the point +// Name return the measurement name for the point. func (p *point) Name() string { if p.cachedName != "" { return p.cachedName @@ -1183,56 +1290,73 @@ func (p *point) Name() string { return p.cachedName } -// SetName updates the measurement name for the point +// SetName updates the measurement name for the point. func (p *point) SetName(name string) { p.cachedName = "" p.key = MakeKey([]byte(name), p.Tags()) } -// Time return the timestamp for the point +// Time return the timestamp for the point. func (p *point) Time() time.Time { return p.time } -// SetTime updates the timestamp for the point +// SetTime updates the timestamp for the point. func (p *point) SetTime(t time.Time) { p.time = t } -// Tags returns the tag set for the point +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. func (p *point) Tags() Tags { - return parseTags(p.key) + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key) + return p.cachedTags } func parseTags(buf []byte) Tags { - tags := map[string]string{} + if len(buf) == 0 { + return nil + } - if len(buf) != 0 { - pos, name := scanTo(buf, 0, ',') + pos, name := scanTo(buf, 0, ',') - // it's an empyt key, so there are no tags - if len(name) == 0 { - return tags - } + // it's an empty key, so there are no tags + if len(name) == 0 { + return nil + } - i := pos + 1 - var key, value []byte - for { - if i >= len(buf) { - break - } - i, key = scanTo(buf, i, '=') - i, value = scanTagValue(buf, i+1) + tags := make(Tags, 0, bytes.Count(buf, []byte(","))) + hasEscape := bytes.IndexByte(buf, '\\') != -1 - if len(value) == 0 { - continue - } + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) - tags[string(unescapeTag(key))] = string(unescapeTag(value)) + if len(value) == 0 { + continue + } - i++ + if hasEscape { + tags = append(tags, Tag{Key: unescapeTag(key), Value: unescapeTag(value)}) + } else { + tags = append(tags, Tag{Key: key, Value: value}) } + + i++ } + return tags } @@ -1243,28 +1367,35 @@ func MakeKey(name []byte, tags Tags) []byte { return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) } -// SetTags replaces the tags for the point +// SetTags replaces the tags for the point. func (p *point) SetTags(tags Tags) { p.key = MakeKey([]byte(p.Name()), tags) + p.cachedTags = tags } -// AddTag adds or replaces a tag value for a point +// AddTag adds or replaces a tag value for a point. func (p *point) AddTag(key, value string) { tags := p.Tags() - tags[key] = value + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags p.key = MakeKey([]byte(p.Name()), tags) } -// Fields returns the fields for the point -func (p *point) Fields() Fields { +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { if p.cachedFields != nil { - return p.cachedFields + return p.cachedFields, nil } - p.cachedFields = p.unmarshalBinary() - return p.cachedFields + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil } -// SetPrecision will round a time to the specified precision +// SetPrecision will round a time to the specified precision. func (p *point) SetPrecision(precision string) { switch precision { case "n": @@ -1281,6 +1412,7 @@ func (p *point) SetPrecision(precision string) { } } +// String returns the string representation of the point. func (p *point) String() string { if p.Time().IsZero() { return string(p.Key()) + " " + string(p.fields) @@ -1288,7 +1420,48 @@ func (p *point) String() string { return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) } +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + tb, err := p.time.MarshalBinary() if err != nil { return nil, err @@ -1311,6 +1484,7 @@ func (p *point) MarshalBinary() ([]byte, error) { return b, nil } +// UnmarshalBinary decodes a binary representation of the point into a point struct. func (p *point) UnmarshalBinary(b []byte) error { var i int keyLen := int(binary.BigEndian.Uint32(b[:4])) @@ -1330,6 +1504,9 @@ func (p *point) UnmarshalBinary(b []byte) error { return nil } +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. func (p *point) PrecisionString(precision string) string { if p.Time().IsZero() { return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) @@ -1338,6 +1515,9 @@ func (p *point) PrecisionString(precision string) string { p.UnixNano()/GetPrecisionMultiplier(precision)) } +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. func (p *point) RoundedString(d time.Duration) string { if p.Time().IsZero() { return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) @@ -1346,65 +1526,230 @@ func (p *point) RoundedString(d time.Duration) string { p.time.Round(d).UnixNano()) } -func (p *point) unmarshalBinary() Fields { - return newFieldsFromBinary(p.fields) +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil } +// HashID returns a non-cryptographic checksum of the point's key. func (p *point) HashID() uint64 { - h := fnv.New64a() + h := NewInlineFNV64a() h.Write(p.key) sum := h.Sum64() return sum } +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. func (p *point) UnixNano() int64 { return p.Time().UnixNano() } -// Tags represents a mapping between a Point's tag names and their -// values. -type Tags map[string]string +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || len(p.String()) <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, Tag{Key: []byte(k), Value: []byte(v)}) + } + sort.Sort(a) + return a +} + +// Len implements sort.Interface. +func (a Tags) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } + +// Swap implements sort.Interface. +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for _, t := range *a { + if bytes.Equal(t.Key, key) { + t.Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} // HashKey hashes all of a tag's keys. -func (t Tags) HashKey() []byte { +func (a Tags) HashKey() []byte { // Empty maps marshal to empty bytes. - if len(t) == 0 { + if len(a) == 0 { return nil } - escaped := Tags{} - for k, v := range t { - ek := escapeTag([]byte(k)) - ev := escapeTag([]byte(v)) + escaped := make(Tags, 0, len(a)) + for _, t := range a { + ek := escapeTag(t.Key) + ev := escapeTag(t.Value) if len(ev) > 0 { - escaped[string(ek)] = string(ev) + escaped = append(escaped, Tag{Key: ek, Value: ev}) } } // Extract keys and determine final size. sz := len(escaped) + (len(escaped) * 2) // separators - keys := make([]string, len(escaped)+1) - i := 0 - for k, v := range escaped { - keys[i] = k - i++ - sz += len(k) + len(v) + keys := make([][]byte, len(escaped)+1) + for i, t := range escaped { + keys[i] = t.Key + sz += len(t.Key) + len(t.Value) } - keys = keys[:i] - sort.Strings(keys) + keys = keys[:len(escaped)] + sort.Sort(byteSlices(keys)) + // Generate marshaled bytes. b := make([]byte, sz) buf := b idx := 0 - for _, k := range keys { + for i, k := range keys { buf[idx] = ',' idx++ copy(buf[idx:idx+len(k)], k) idx += len(k) buf[idx] = '=' idx++ - v := escaped[k] + v := escaped[i].Value copy(buf[idx:idx+len(v)], v) idx += len(v) } @@ -1418,159 +1763,235 @@ type Fields map[string]interface{} func parseNumber(val []byte) (interface{}, error) { if val[len(val)-1] == 'i' { val = val[:len(val)-1] - return strconv.ParseInt(string(val), 10, 64) + return parseIntBytes(val, 10, 64) } for i := 0; i < len(val); i++ { // If there is a decimal or an N (NaN), I (Inf), parse as float if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' { - return strconv.ParseFloat(string(val), 64) + return parseFloatBytes(val, 64) } if val[i] < '0' && val[i] > '9' { return string(val), nil } } - return strconv.ParseFloat(string(val), 64) + return parseFloatBytes(val, 64) } -func newFieldsFromBinary(buf []byte) Fields { - fields := make(Fields, 8) - var ( - i int - name, valueBuf []byte - value interface{} - err error - ) - for i < len(buf) { +// FieldIterator retuns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} - i, name = scanTo(buf, i, '=') - name = escape.Unescape(name) +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} - i, valueBuf = scanFieldValue(buf, i+1) - if len(name) > 0 { - if len(valueBuf) == 0 { - fields[string(name)] = nil - continue - } +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } - // If the first char is a double-quote, then unmarshal as string - if valueBuf[0] == '"' { - value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1])) - // Check for numeric characters and special NaN or Inf - } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' || - valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN - valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } - value, err = parseNumber(valueBuf) - if err != nil { - panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err)) - } + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ - // Otherwise parse it as bool - } else { - value, err = strconv.ParseBool(string(valueBuf)) - if err != nil { - panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err)) - } - } - fields[string(name)] = value + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiI`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float } - i++ + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) } - return fields + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Delete deletes the current field. +func (p *point) Delete() { + switch { + case p.it.end == p.it.start: + case p.it.end >= len(p.fields): + p.fields = p.fields[:p.it.start] + case p.it.start == 0: + p.fields = p.fields[p.it.end:] + default: + p.fields = append(p.fields[:p.it.start], p.fields[p.it.end:]...) + } + + p.it.end = p.it.start + p.it.key = nil + p.it.valueBuf = nil + p.it.fieldType = Empty +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 } // MarshalBinary encodes all the fields to their proper type and returns the binary // represenation // NOTE: uint64 is specifically not supported due to potential overflow when we decode // again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... func (p Fields) MarshalBinary() []byte { - b := []byte{} - keys := make([]string, len(p)) - i := 0 + var b []byte + keys := make([]string, 0, len(p)) + for k := range p { - keys[i] = k - i++ + keys = append(keys, k) } + + // Not really necessary, can probably be removed. sort.Strings(keys) - for _, k := range keys { - v := p[k] - b = append(b, []byte(escape.String(k))...) - b = append(b, '=') - switch t := v.(type) { - case int: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int64: - b = append(b, []byte(strconv.FormatInt(t, 10))...) - b = append(b, 'i') - case uint: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case float32: - val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32)) - b = append(b, val...) - case float64: - val := []byte(strconv.FormatFloat(t, 'f', -1, 64)) - b = append(b, val...) - case bool: - b = append(b, []byte(strconv.FormatBool(t))...) - case []byte: - b = append(b, t...) - case string: - b = append(b, '"') - b = append(b, []byte(escapeStringField(t))...) - b = append(b, '"') - case nil: - // skip - default: - // Can't determine the type, so convert to string - b = append(b, '"') - b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...) - b = append(b, '"') - - } - b = append(b, ',') - } - if len(b) > 0 { - return b[0 : len(b)-1] + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) } + return b } -type indexedSlice struct { - indices []int - b []byte -} +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + // TODO: 'uint' should be considered just as "dangerous" as a uint64, + // perhaps the value should be checked and capped at MaxInt64? We could + // then include uint64 as an accepted value + case uint: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') -func (s *indexedSlice) Less(i, j int) bool { - _, a := scanTo(s.b, s.indices[i], '=') - _, b := scanTo(s.b, s.indices[j], '=') - return bytes.Compare(a, b) < 0 -} + } -func (s *indexedSlice) Swap(i, j int) { - s.indices[i], s.indices[j] = s.indices[j], s.indices[i] + return b } -func (s *indexedSlice) Len() int { - return len(s.indices) -} +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go index 72435f5c7..c087a4882 100644 --- a/vendor/github.com/influxdata/influxdb/models/rows.go +++ b/vendor/github.com/influxdata/influxdb/models/rows.go @@ -1,7 +1,6 @@ package models import ( - "hash/fnv" "sort" ) @@ -11,7 +10,7 @@ type Row struct { Tags map[string]string `json:"tags,omitempty"` Columns []string `json:"columns,omitempty"` Values [][]interface{} `json:"values,omitempty"` - Err error `json:"err,omitempty"` + Partial bool `json:"partial,omitempty"` } // SameSeries returns true if r contains values for the same series as o. @@ -21,7 +20,7 @@ func (r *Row) SameSeries(o *Row) bool { // tagsHash returns a hash of tag key/value pairs. func (r *Row) tagsHash() uint64 { - h := fnv.New64a() + h := NewInlineFNV64a() keys := r.tagsKeys() for _, k := range keys { h.Write([]byte(k)) @@ -43,8 +42,10 @@ func (r *Row) tagsKeys() []string { // Rows represents a collection of rows. Rows implements sort.Interface. type Rows []*Row +// Len implements sort.Interface. func (p Rows) Len() int { return len(p) } +// Less implements sort.Interface. func (p Rows) Less(i, j int) bool { // Sort by name first. if p[i].Name != p[j].Name { @@ -57,4 +58,5 @@ func (p Rows) Less(i, j int) bool { return p[i].tagsHash() < p[j].tagsHash() } +// Swap implements sort.Interface. func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go new file mode 100644 index 000000000..553e9d09f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go index 9e4157774..e98f2cb33 100644 --- a/vendor/github.com/influxdata/influxdb/models/time.go +++ b/vendor/github.com/influxdata/influxdb/models/time.go @@ -9,14 +9,36 @@ import ( "time" ) +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + var ( - // MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch. - MaxNanoTime = time.Unix(0, math.MaxInt64).UTC() - // MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch. - MinNanoTime = time.Unix(0, math.MinInt64).UTC() + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. - ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime) + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) ) // SafeCalcTime safely calculates the time given. Will return error if the time is outside the @@ -24,7 +46,8 @@ var ( func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { mult := GetPrecisionMultiplier(precision) if t, ok := safeSignedMult(timestamp, mult); ok { - return time.Unix(0, t).UTC(), nil + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) } return time.Time{}, ErrTimeOutOfRange @@ -32,7 +55,7 @@ func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { // CheckTime checks that a time is within the safe range. func CheckTime(t time.Time) error { - if t.Before(MinNanoTime) || t.After(MaxNanoTime) { + if t.Before(minNanoTime) || t.After(maxNanoTime) { return ErrTimeOutOfRange } return nil @@ -43,7 +66,7 @@ func safeSignedMult(a, b int64) (int64, bool) { if a == 0 || b == 0 || a == 1 || b == 1 { return a * b, true } - if a == math.MinInt64 || b == math.MaxInt64 { + if a == MinNanoTime || b == MaxNanoTime { return 0, false } c := a * b diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go index a2191ffd4..f148d66ac 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -1,7 +1,21 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. package escape -import "bytes" +import ( + "bytes" + "strings" +) +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. func Bytes(in []byte) []byte { for b, esc := range Codes { in = bytes.Replace(in, []byte{b}, esc, -1) @@ -9,6 +23,50 @@ func Bytes(in []byte) []byte { return in } +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. func Unescape(in []byte) []byte { if len(in) == 0 { return nil diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go index 330fbf422..db98033b0 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go @@ -3,32 +3,19 @@ package escape import "strings" var ( - Codes = map[byte][]byte{ - ',': []byte(`\,`), - '"': []byte(`\"`), - ' ': []byte(`\ `), - '=': []byte(`\=`), - } - - codesStr = map[string]string{} + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) ) -func init() { - for k, v := range Codes { - codesStr[string(k)] = string(v) - } -} - +// UnescapeString returns unescaped version of in. func UnescapeString(in string) string { - for b, esc := range codesStr { - in = strings.Replace(in, esc, b, -1) + if strings.IndexByte(in, '\\') == -1 { + return in } - return in + return unescaper.Replace(in) } +// String returns the escaped version of in. func String(in string) string { - for b, esc := range codesStr { - in = strings.Replace(in, b, esc, -1) - } - return in + return escaper.Replace(in) } diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go index ed87ca5ec..b6b8dbc1a 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/conn.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go @@ -44,9 +44,9 @@ const ( type watchType int const ( - watchTypeData = iota - watchTypeExist = iota - watchTypeChild = iota + watchTypeData = iota + watchTypeExist + watchTypeChild ) type watchPathType struct { @@ -61,6 +61,11 @@ type Logger interface { Printf(string, ...interface{}) } +type authCreds struct { + scheme string + auth []byte +} + type Conn struct { lastZxid int64 sessionID int64 @@ -75,21 +80,28 @@ type Conn struct { server string // remember the address/port of the current server conn net.Conn eventChan chan Event + eventCallback EventCallback // may be nil shouldQuit chan struct{} pingInterval time.Duration recvTimeout time.Duration connectTimeout time.Duration + creds []authCreds + credsMu sync.Mutex // protects server + sendChan chan *request requests map[int32]*request // Xid -> pending request requestsLock sync.Mutex watchers map[watchPathType][]chan Event watchersLock sync.Mutex + closeChan chan struct{} // channel to tell send loop stop // Debug (used by unit tests) reconnectDelay time.Duration logger Logger + + buf []byte } // connOption represents a connection option. @@ -185,6 +197,7 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti watchers: make(map[watchPathType][]chan Event), passwd: emptyPassword, logger: DefaultLogger, + buf: make([]byte, bufferSize), // Debug reconnectDelay: 0, @@ -224,6 +237,18 @@ func WithHostProvider(hostProvider HostProvider) connOption { } } +// EventCallback is a function that is called when an Event occurs. +type EventCallback func(Event) + +// WithEventCallback returns a connection option that specifies an event +// callback. +// The callback must not block - doing so would delay the ZK go routines. +func WithEventCallback(cb EventCallback) connOption { + return func(c *Conn) { + c.eventCallback = cb + } +} + func (c *Conn) Close() { close(c.shouldQuit) @@ -238,7 +263,7 @@ func (c *Conn) State() State { return State(atomic.LoadInt32((*int32)(&c.state))) } -// SessionId returns the current session id of the connection. +// SessionID returns the current session id of the connection. func (c *Conn) SessionID() int64 { return atomic.LoadInt64(&c.sessionID) } @@ -258,8 +283,16 @@ func (c *Conn) setTimeouts(sessionTimeoutMs int32) { func (c *Conn) setState(state State) { atomic.StoreInt32((*int32)(&c.state), int32(state)) + c.sendEvent(Event{Type: EventSession, State: state, Server: c.Server()}) +} + +func (c *Conn) sendEvent(evt Event) { + if c.eventCallback != nil { + c.eventCallback(evt) + } + select { - case c.eventChan <- Event{Type: EventSession, State: state, Server: c.Server()}: + case c.eventChan <- evt: default: // panic("zk: event channel full - it must be monitored and never allowed to be full") } @@ -296,6 +329,65 @@ func (c *Conn) connect() error { } } +func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { + c.credsMu.Lock() + defer c.credsMu.Unlock() + + defer close(reauthReadyChan) + + c.logger.Printf("Re-submitting `%d` credentials after reconnect", + len(c.creds)) + + for _, cred := range c.creds { + resChan, err := c.sendRequest( + opSetAuth, + &setAuthRequest{Type: 0, + Scheme: cred.scheme, + Auth: cred.auth, + }, + &setAuthResponse{}, + nil) + + if err != nil { + c.logger.Printf("Call to sendRequest failed during credential resubmit: %s", err) + // FIXME(prozlach): lets ignore errors for now + continue + } + + res := <-resChan + if res.err != nil { + c.logger.Printf("Credential re-submit failed: %s", res.err) + // FIXME(prozlach): lets ignore errors for now + continue + } + } +} + +func (c *Conn) sendRequest( + opcode int32, + req interface{}, + res interface{}, + recvFunc func(*request, *responseHeader, error), +) ( + <-chan response, + error, +) { + rq := &request{ + xid: c.nextXid(), + opcode: opcode, + pkt: req, + recvStruct: res, + recvChan: make(chan response, 1), + recvFunc: recvFunc, + } + + if err := c.sendData(rq); err != nil { + return nil, err + } + + return rq.recvChan, nil +} + func (c *Conn) loop() { for { if err := c.connect(); err != nil { @@ -313,13 +405,15 @@ func (c *Conn) loop() { c.conn.Close() case err == nil: c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs) - c.hostProvider.Connected() // mark success - closeChan := make(chan struct{}) // channel to tell send loop stop - var wg sync.WaitGroup + c.hostProvider.Connected() // mark success + c.closeChan = make(chan struct{}) // channel to tell send loop stop + reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted + var wg sync.WaitGroup wg.Add(1) go func() { - err := c.sendLoop(c.conn, closeChan) + <-reauthChan + err := c.sendLoop() c.logger.Printf("Send loop terminated: err=%v", err) c.conn.Close() // causes recv loop to EOF/exit wg.Done() @@ -332,10 +426,12 @@ func (c *Conn) loop() { if err == nil { panic("zk: recvLoop should never return nil error") } - close(closeChan) // tell send loop to exit + close(c.closeChan) // tell send loop to exit wg.Done() }() + c.resendZkAuth(reauthChan) + c.sendSetWatches() wg.Wait() } @@ -507,66 +603,73 @@ func (c *Conn) authenticate() error { return nil } -func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan struct{}) error { - pingTicker := time.NewTicker(c.pingInterval) - defer pingTicker.Stop() +func (c *Conn) sendData(req *request) error { + header := &requestHeader{req.xid, req.opcode} + n, err := encodePacket(c.buf[4:], header) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } - buf := make([]byte, bufferSize) - for { - select { - case req := <-c.sendChan: - header := &requestHeader{req.xid, req.opcode} - n, err := encodePacket(buf[4:], header) - if err != nil { - req.recvChan <- response{-1, err} - continue - } + n2, err := encodePacket(c.buf[4+n:], req.pkt) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } - n2, err := encodePacket(buf[4+n:], req.pkt) - if err != nil { - req.recvChan <- response{-1, err} - continue - } + n += n2 - n += n2 + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) - binary.BigEndian.PutUint32(buf[:4], uint32(n)) + c.requestsLock.Lock() + select { + case <-c.closeChan: + req.recvChan <- response{-1, ErrConnectionClosed} + c.requestsLock.Unlock() + return ErrConnectionClosed + default: + } + c.requests[req.xid] = req + c.requestsLock.Unlock() - c.requestsLock.Lock() - select { - case <-closeChan: - req.recvChan <- response{-1, ErrConnectionClosed} - c.requestsLock.Unlock() - return ErrConnectionClosed - default: - } - c.requests[req.xid] = req - c.requestsLock.Unlock() + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + req.recvChan <- response{-1, err} + c.conn.Close() + return err + } - conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) - _, err = conn.Write(buf[:n+4]) - conn.SetWriteDeadline(time.Time{}) - if err != nil { - req.recvChan <- response{-1, err} - conn.Close() + return nil +} + +func (c *Conn) sendLoop() error { + pingTicker := time.NewTicker(c.pingInterval) + defer pingTicker.Stop() + + for { + select { + case req := <-c.sendChan: + if err := c.sendData(req); err != nil { return err } case <-pingTicker.C: - n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) + n, err := encodePacket(c.buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) if err != nil { panic("zk: opPing should never fail to serialize") } - binary.BigEndian.PutUint32(buf[:4], uint32(n)) + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) - conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) - _, err = conn.Write(buf[:n+4]) - conn.SetWriteDeadline(time.Time{}) + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) if err != nil { - conn.Close() + c.conn.Close() return err } - case <-closeChan: + case <-c.closeChan: return nil } } @@ -611,10 +714,7 @@ func (c *Conn) recvLoop(conn net.Conn) error { Path: res.Path, Err: nil, } - select { - case c.eventChan <- ev: - default: - } + c.sendEvent(ev) wTypes := make([]watchType, 0, 2) switch res.Type { case EventNodeCreated: @@ -706,7 +806,28 @@ func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func (c *Conn) AddAuth(scheme string, auth []byte) error { _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil) - return err + + if err != nil { + return err + } + + // Remember authdata so that it can be re-submitted on reconnect + // + // FIXME(prozlach): For now we treat "userfoo:passbar" and "userfoo:passbar2" + // as two different entries, which will be re-submitted on reconnet. Some + // research is needed on how ZK treats these cases and + // then maybe switch to something like "map[username] = password" to allow + // only single password for given user with users being unique. + obj := authCreds{ + scheme: scheme, + auth: auth, + } + + c.credsMu.Lock() + c.creds = append(c.creds, obj) + c.credsMu.Unlock() + + return nil } func (c *Conn) Children(path string) ([]string, *Stat, error) { @@ -867,6 +988,7 @@ func (c *Conn) Sync(path string) (string, error) { type MultiResponse struct { Stat *Stat String string + Error error } // Multi executes multiple ZooKeeper operations or none of them. The provided @@ -897,7 +1019,7 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) { _, err := c.request(opMulti, req, res, nil) mr := make([]MultiResponse, len(res.Ops)) for i, op := range res.Ops { - mr[i] = MultiResponse{Stat: op.Stat, String: op.String} + mr[i] = MultiResponse{Stat: op.Stat, String: op.String, Error: op.Err.toError()} } return mr, err } diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/samuel/go-zookeeper/zk/constants.go index f9b39b904..33b5563b9 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/constants.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/constants.go @@ -28,18 +28,19 @@ const ( opClose = -11 opSetAuth = 100 opSetWatches = 101 + opError = -1 // Not in protocol, used internally opWatcherEvent = -2 ) const ( - EventNodeCreated = EventType(1) - EventNodeDeleted = EventType(2) - EventNodeDataChanged = EventType(3) - EventNodeChildrenChanged = EventType(4) + EventNodeCreated EventType = 1 + EventNodeDeleted EventType = 2 + EventNodeDataChanged EventType = 3 + EventNodeChildrenChanged EventType = 4 - EventSession = EventType(-1) - EventNotWatching = EventType(-2) + EventSession EventType = -1 + EventNotWatching EventType = -2 ) var ( @@ -54,14 +55,13 @@ var ( ) const ( - StateUnknown = State(-1) - StateDisconnected = State(0) - StateConnecting = State(1) - StateAuthFailed = State(4) - StateConnectedReadOnly = State(5) - StateSaslAuthenticated = State(6) - StateExpired = State(-112) - // StateAuthFailed = State(-113) + StateUnknown State = -1 + StateDisconnected State = 0 + StateConnecting State = 1 + StateAuthFailed State = 4 + StateConnectedReadOnly State = 5 + StateSaslAuthenticated State = 6 + StateExpired State = -112 StateConnected = State(100) StateHasSession = State(101) @@ -154,20 +154,20 @@ const ( errBadArguments = -8 errInvalidState = -9 // API errors - errAPIError = ErrCode(-100) - errNoNode = ErrCode(-101) // * - errNoAuth = ErrCode(-102) - errBadVersion = ErrCode(-103) // * - errNoChildrenForEphemerals = ErrCode(-108) - errNodeExists = ErrCode(-110) // * - errNotEmpty = ErrCode(-111) - errSessionExpired = ErrCode(-112) - errInvalidCallback = ErrCode(-113) - errInvalidAcl = ErrCode(-114) - errAuthFailed = ErrCode(-115) - errClosing = ErrCode(-116) - errNothing = ErrCode(-117) - errSessionMoved = ErrCode(-118) + errAPIError ErrCode = -100 + errNoNode ErrCode = -101 // * + errNoAuth ErrCode = -102 + errBadVersion ErrCode = -103 // * + errNoChildrenForEphemerals ErrCode = -108 + errNodeExists ErrCode = -110 // * + errNotEmpty ErrCode = -111 + errSessionExpired ErrCode = -112 + errInvalidCallback ErrCode = -113 + errInvalidAcl ErrCode = -114 + errAuthFailed ErrCode = -115 + errClosing ErrCode = -116 + errNothing ErrCode = -117 + errSessionMoved ErrCode = -118 ) // Constants for ACL permissions diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/samuel/go-zookeeper/zk/lock.go index f13a8b0ba..3c35a427c 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/lock.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/lock.go @@ -58,8 +58,16 @@ func (l *Lock) Lock() error { parts := strings.Split(l.path, "/") pth := "" for _, p := range parts[1:] { + var exists bool pth += "/" + p - _, err := l.c.Create(pth, []byte{}, 0, l.acl) + exists, _, err = l.c.Exists(pth) + if err != nil { + return err + } + if exists == true { + continue + } + _, err = l.c.Create(pth, []byte{}, 0, l.acl) if err != nil && err != ErrNodeExists { return err } @@ -86,7 +94,7 @@ func (l *Lock) Lock() error { } lowestSeq := seq - prevSeq := 0 + prevSeq := -1 prevSeqPath := "" for _, p := range children { s, err := parseSeq(p) diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go index 618185a23..3663064ca 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go @@ -99,37 +99,41 @@ func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) return cluster, nil } -func (ts *TestCluster) Connect(idx int) (*Conn, error) { - zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15) +func (tc *TestCluster) Connect(idx int) (*Conn, error) { + zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", tc.Servers[idx].Port)}, time.Second*15) return zk, err } -func (ts *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { - return ts.ConnectAllTimeout(time.Second * 15) +func (tc *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { + return tc.ConnectAllTimeout(time.Second * 15) } -func (ts *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { - hosts := make([]string, len(ts.Servers)) - for i, srv := range ts.Servers { +func (tc *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { + return tc.ConnectWithOptions(sessionTimeout) +} + +func (tc *TestCluster) ConnectWithOptions(sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + hosts := make([]string, len(tc.Servers)) + for i, srv := range tc.Servers { hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) } - zk, ch, err := Connect(hosts, sessionTimeout) + zk, ch, err := Connect(hosts, sessionTimeout, options...) return zk, ch, err } -func (ts *TestCluster) Stop() error { - for _, srv := range ts.Servers { +func (tc *TestCluster) Stop() error { + for _, srv := range tc.Servers { srv.Srv.Stop() } - defer os.RemoveAll(ts.Path) - return ts.waitForStop(5, time.Second) + defer os.RemoveAll(tc.Path) + return tc.waitForStop(5, time.Second) } // waitForStart blocks until the cluster is up -func (ts *TestCluster) waitForStart(maxRetry int, interval time.Duration) error { +func (tc *TestCluster) waitForStart(maxRetry int, interval time.Duration) error { // verify that the servers are up with SRVR - serverAddrs := make([]string, len(ts.Servers)) - for i, s := range ts.Servers { + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) } @@ -144,10 +148,10 @@ func (ts *TestCluster) waitForStart(maxRetry int, interval time.Duration) error } // waitForStop blocks until the cluster is down -func (ts *TestCluster) waitForStop(maxRetry int, interval time.Duration) error { +func (tc *TestCluster) waitForStop(maxRetry int, interval time.Duration) error { // verify that the servers are up with RUOK - serverAddrs := make([]string, len(ts.Servers)) - for i, s := range ts.Servers { + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) } @@ -188,3 +192,25 @@ func (tc *TestCluster) StopServer(server string) { } panic(fmt.Sprintf("Unknown server: %s", server)) } + +func (tc *TestCluster) StartAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Start(); err != nil { + return fmt.Errorf( + "Failed to start server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} + +func (tc *TestCluster) StopAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Stop(); err != nil { + return fmt.Errorf( + "Failed to stop server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/samuel/go-zookeeper/zk/structs.go index 02cd3f353..d4af27dea 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/structs.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/structs.go @@ -270,6 +270,7 @@ type multiResponseOp struct { Header multiHeader String string Stat *Stat + Err ErrCode } type multiResponse struct { Ops []multiResponseOp @@ -327,6 +328,8 @@ func (r *multiRequest) Decode(buf []byte) (int, error) { } func (r *multiResponse) Decode(buf []byte) (int, error) { + var multiErr error + r.Ops = make([]multiResponseOp, 0) r.DoneHeader = multiHeader{-1, true, -1} total := 0 @@ -347,6 +350,8 @@ func (r *multiResponse) Decode(buf []byte) (int, error) { switch header.Type { default: return total, ErrAPIError + case opError: + w = reflect.ValueOf(&res.Err) case opCreate: w = reflect.ValueOf(&res.String) case opSetData: @@ -362,8 +367,12 @@ func (r *multiResponse) Decode(buf []byte) (int, error) { total += n } r.Ops = append(r.Ops, res) + if multiErr == nil && res.Err != errOk { + // Use the first error as the error returned from Multi(). + multiErr = res.Err.toError() + } } - return total, nil + return total, multiErr } type watcherEvent struct { diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 000000000..95a0f0541 --- /dev/null +++ b/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go similarity index 91% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go rename to vendor/github.com/ugorji/go/codec/0doc.go index caa7e0a3b..209f9ebad 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/0doc.go +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -64,10 +64,11 @@ Rich Feature Set includes: - Never silently skip data when decoding. User decides whether to return an error or silently skip data when keys or indexes in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) - Encode/Decode from/to chan types (for iterative streaming support) - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. + - Handle unique idiosyncrasies of codecs e.g. - For messagepack, configure how ambiguities in handling raw bytes are resolved - For messagepack, provide rpc server/client codec to support msgpack-rpc protocol defined at: @@ -171,6 +172,8 @@ package codec // TODO: // +// - optimization for codecgen: +// if len of entity is <= 3 words, then support a value receiver for encode. // - (En|De)coder should store an error when it occurs. // Until reset, subsequent calls return that error that was stored. // This means that free panics must go away. @@ -178,16 +181,19 @@ package codec // - Decoding using a chan is good, but incurs concurrency costs. // This is because there's no fast way to use a channel without it // having to switch goroutines constantly. -// Callback pattern is still the best. Maybe cnsider supporting something like: +// Callback pattern is still the best. Maybe consider supporting something like: // type X struct { // Name string // Ys []Y // Ys chan <- Y -// Ys func(interface{}) -> call this interface for each entry in there. +// Ys func(Y) -> call this function for each entry // } // - Consider adding a isZeroer interface { isZero() bool } // It is used within isEmpty, for omitEmpty support. // - Consider making Handle used AS-IS within the encoding/decoding session. // This means that we don't cache Handle information within the (En|De)coder, // except we really need it at Reset(...) -// - Handle recursive types during encoding/decoding? +// - Consider adding math/big support +// - Consider reducing the size of the generated functions: +// Maybe use one loop, and put the conditionals in the loop. +// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } } diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md similarity index 99% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md rename to vendor/github.com/ugorji/go/codec/README.md index a790a52bb..91cb3a27b 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/README.md +++ b/vendor/github.com/ugorji/go/codec/README.md @@ -68,7 +68,7 @@ Rich Feature Set includes: - Encode/Decode from/to chan types (for iterative streaming support) - Drop-in replacement for encoding/json. `json:` key in struct tag supported. - Provides a RPC Server and Client Codec for net/rpc communication protocol. - - Handle unique idiosynchracies of codecs e.g. + - Handle unique idiosyncrasies of codecs e.g. - For messagepack, configure how ambiguities in handling raw bytes are resolved - For messagepack, provide rpc server/client codec to support msgpack-rpc protocol defined at: diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go rename to vendor/github.com/ugorji/go/codec/binc.go index c884d14dc..33120dcb6 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/binc.go +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -348,6 +348,13 @@ func (d *bincDecDriver) readNextBd() { d.bdRead = true } +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *bincDecDriver) ContainerType() (vt valueType) { if d.vd == bincVdSpecial && d.vs == bincSpNil { return valueTypeNil @@ -705,7 +712,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) } func (d *bincDecDriver) DecodeString() (s string) { - // DecodeBytes does not accomodate symbols, whose impl stores string version in map. + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. // Use decStringAndBytes directly. // return string(d.DecodeBytes(d.b[:], true, true)) _, s = d.decStringAndBytes(d.b[:], true, true) @@ -908,10 +915,14 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver { func (e *bincEncDriver) reset() { e.w = e.e.w + e.s = 0 + e.m = nil } func (d *bincDecDriver) reset() { d.r = d.d.r + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 } var _ decDriver = (*bincDecDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go rename to vendor/github.com/ugorji/go/codec/cbor.go index 0e5d32b2e..4fa349ac8 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/cbor.go +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -188,6 +188,13 @@ func (d *cborDecDriver) readNextBd() { d.bdRead = true } +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *cborDecDriver) ContainerType() (vt valueType) { if d.bd == cborBdNil { return valueTypeNil @@ -508,7 +515,7 @@ func (d *cborDecDriver) DecodeNaked() { n.v = valueTypeExt n.u = d.decUint() n.l = nil - d.bdRead = false + // d.bdRead = false // d.d.decode(&re.Value) // handled by decode itself. // decodeFurther = true default: @@ -578,6 +585,7 @@ func (e *cborEncDriver) reset() { func (d *cborDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } var _ decDriver = (*cborDecDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go rename to vendor/github.com/ugorji/go/codec/decode.go index b3b99f036..52c1dfe83 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/decode.go +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -91,10 +91,12 @@ type decDriver interface { uncacheRead() } -type decNoSeparator struct{} +type decNoSeparator struct { +} + +func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) ReadEnd() {} -func (_ decNoSeparator) uncacheRead() {} +// func (_ decNoSeparator) uncacheRead() {} type DecodeOptions struct { // MapType specifies type to use during schema-less decoding of a map in the stream. @@ -161,6 +163,15 @@ type DecodeOptions struct { // Note: Handles will be smart when using the intern functionality. // So everything will not be interned. InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool } // ------------------------------------ @@ -433,6 +444,10 @@ func (f *decFnInfo) rawExt(rv reflect.Value) { f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil) } +func (f *decFnInfo) raw(rv reflect.Value) { + rv.SetBytes(f.d.raw()) +} + func (f *decFnInfo) ext(rv reflect.Value) { f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn) } @@ -583,14 +598,16 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { if d.mtid == 0 || d.mtid == mapIntfIntfTypId { l := len(n.ms) n.ms = append(n.ms, nil) - d.decode(&n.ms[l]) - rvn = reflect.ValueOf(&n.ms[l]).Elem() + var v2 interface{} = &n.ms[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() n.ms = n.ms[:l] } else if d.mtid == mapStrIntfTypId { // for json performance l := len(n.ns) n.ns = append(n.ns, nil) - d.decode(&n.ns[l]) - rvn = reflect.ValueOf(&n.ns[l]).Elem() + var v2 interface{} = &n.ns[l] + d.decode(v2) + rvn = reflect.ValueOf(v2).Elem() n.ns = n.ns[:l] } else { rvn = reflect.New(d.h.MapType).Elem() @@ -601,9 +618,13 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { if d.stid == 0 || d.stid == intfSliceTypId { l := len(n.ss) n.ss = append(n.ss, nil) - d.decode(&n.ss[l]) - rvn = reflect.ValueOf(&n.ss[l]).Elem() + var v2 interface{} = &n.ss[l] + d.decode(v2) n.ss = n.ss[:l] + rvn = reflect.ValueOf(v2).Elem() + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn = reflectArrayOf(rvn) + } } else { rvn = reflect.New(d.h.SliceType).Elem() d.decodeValue(rvn, nil) @@ -615,9 +636,9 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) { l := len(n.is) n.is = append(n.is, nil) v2 := &n.is[l] - n.is = n.is[:l] d.decode(v2) v = *v2 + n.is = n.is[:l] } bfn := d.h.getExtForTag(tag) if bfn == nil { @@ -1166,7 +1187,7 @@ type decRtidFn struct { // primitives are being decoded. // // maps and arrays are not handled by this mechanism. -// However, RawExt is, and we accomodate for extensions that decode +// However, RawExt is, and we accommodate for extensions that decode // RawExt from DecodeNaked, but need to decode the value subsequently. // kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. // @@ -1453,8 +1474,8 @@ func (d *Decoder) swallow() { l := len(n.is) n.is = append(n.is, nil) v2 := &n.is[l] - n.is = n.is[:l] d.decode(v2) + n.is = n.is[:l] } } } @@ -1504,6 +1525,8 @@ func (d *Decoder) decode(iv interface{}) { *v = 0 case *[]uint8: *v = nil + case *Raw: + *v = nil case reflect.Value: if v.Kind() != reflect.Ptr || v.IsNil() { d.errNotValidPtrValue(v) @@ -1543,7 +1566,6 @@ func (d *Decoder) decode(iv interface{}) { d.decodeValueNotNil(v.Elem(), nil) case *string: - *v = d.d.DecodeString() case *bool: *v = d.d.DecodeBool() @@ -1574,6 +1596,9 @@ func (d *Decoder) decode(iv interface{}) { case *[]uint8: *v = d.d.DecodeBytes(*v, false, false) + case *Raw: + *v = d.raw() + case *interface{}: d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil) @@ -1695,6 +1720,8 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool fn.f = (*decFnInfo).selferUnmarshal } else if rtid == rawExtTypId { fn.f = (*decFnInfo).rawExt + } else if rtid == rawTypId { + fn.f = (*decFnInfo).raw } else if d.d.IsBuiltinType(rtid) { fn.f = (*decFnInfo).builtin } else if xfFn := d.h.getExt(rtid); xfFn != nil { @@ -1793,12 +1820,13 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool } func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. if d.h.ErrorIfNoField { if index >= 0 { d.errorf("no matching struct field found when decoding stream array at index %v", index) return } else if rvkencname != "" { - d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname) + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) return } } @@ -1862,6 +1890,7 @@ func (d *Decoder) intern(s string) { } } +// nextValueBytes returns the next value in the stream as a set of bytes. func (d *Decoder) nextValueBytes() []byte { d.d.uncacheRead() d.r.track() @@ -1869,6 +1898,15 @@ func (d *Decoder) nextValueBytes() []byte { return d.r.stopTrack() } +func (d *Decoder) raw() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + // -------------------------------------------------- // decSliceHelper assists when decoding into a slice, from a map or an array in the stream. diff --git a/vendor/github.com/ugorji/go/codec/decode_go.go b/vendor/github.com/ugorji/go/codec/decode_go.go new file mode 100644 index 000000000..ba289cef6 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + return +} diff --git a/vendor/github.com/ugorji/go/codec/decode_go14.go b/vendor/github.com/ugorji/go/codec/decode_go14.go new file mode 100644 index 000000000..50063bc8f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode_go14.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) { + panic("reflect.ArrayOf unsupported") +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go similarity index 89% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go rename to vendor/github.com/ugorji/go/codec/encode.go index 99af6fa55..c2cef812e 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/encode.go +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -110,6 +110,28 @@ type EncodeOptions struct { // Canonical bool + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + // AsSymbols defines what should be encoded as symbols. // // Encoding as symbols can reduce the encoded size significantly. @@ -132,13 +154,16 @@ type simpleIoEncWriterWriter struct { w io.Writer bw io.ByteWriter sw ioEncStringWriter + bs [1]byte } func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { if o.bw != nil { return o.bw.WriteByte(c) } - _, err = o.w.Write([]byte{c}) + // _, err = o.w.Write([]byte{c}) + o.bs[0] = c + _, err = o.w.Write(o.bs[:]) return } @@ -210,45 +235,57 @@ type bytesEncWriter struct { } func (z *bytesEncWriter) writeb(s []byte) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) + if len(s) == 0 { + return } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) } func (z *bytesEncWriter) writestr(s string) { - if len(s) > 0 { - c := z.grow(len(s)) - copy(z.b[c:], s) + if len(s) == 0 { + return } + oc, a := z.growNoAlloc(len(s)) + if a { + z.growAlloc(len(s), oc) + } + copy(z.b[oc:], s) } func (z *bytesEncWriter) writen1(b1 byte) { - c := z.grow(1) - z.b[c] = b1 + oc, a := z.growNoAlloc(1) + if a { + z.growAlloc(1, oc) + } + z.b[oc] = b1 } func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { - c := z.grow(2) - z.b[c] = b1 - z.b[c+1] = b2 + oc, a := z.growNoAlloc(2) + if a { + z.growAlloc(2, oc) + } + z.b[oc+1] = b2 + z.b[oc] = b1 } func (z *bytesEncWriter) atEndOfEncode() { *(z.out) = z.b[:z.c] } -func (z *bytesEncWriter) grow(n int) (oldcursor int) { +// have a growNoalloc(n int), which can be inlined. +// if allocation is needed, then call growAlloc(n int) + +func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) { oldcursor = z.c - z.c = oldcursor + n + z.c = z.c + n if z.c > len(z.b) { if z.c > cap(z.b) { - // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. - // bytes.Buffer model (2*cap + n): much better - // bs := make([]byte, 2*cap(z.b)+n) - bs := make([]byte, growCap(cap(z.b), 1, n)) - copy(bs, z.b[:oldcursor]) - z.b = bs + allocNeeded = true } else { z.b = z.b[:cap(z.b)] } @@ -256,6 +293,15 @@ func (z *bytesEncWriter) grow(n int) (oldcursor int) { return } +func (z *bytesEncWriter) growAlloc(n int, oldcursor int) { + // appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls. + // bytes.Buffer model (2*cap + n): much better + // bs := make([]byte, 2*cap(z.b)+n) + bs := make([]byte, growCap(cap(z.b), 1, n)) + copy(bs, z.b[:oldcursor]) + z.b = bs +} + // --------------------------------------------- type encFnInfo struct { @@ -270,6 +316,10 @@ func (f *encFnInfo) builtin(rv reflect.Value) { f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface()) } +func (f *encFnInfo) raw(rv reflect.Value) { + f.e.raw(rv.Interface().(Raw)) +} + func (f *encFnInfo) rawExt(rv reflect.Value) { // rev := rv.Interface().(RawExt) // f.e.e.EncodeRawExt(&rev, f.e) @@ -296,7 +346,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v v = rv.Interface() } else if indir == -1 { // If a non-pointer was passed to Encode(), then that value is not addressable. - // Take addr if addresable, else copy value to an addressable value. + // Take addr if addressable, else copy value to an addressable value. if rv.CanAddr() { v = rv.Addr().Interface() } else { @@ -464,7 +514,7 @@ func (f *encFnInfo) kSlice(rv reflect.Value) { for j := 0; j < l; j++ { if cr != nil { if ti.mbs { - if l%2 == 0 { + if j%2 == 0 { cr.sendContainerState(containerMapKey) } else { cr.sendContainerState(containerMapValue) @@ -503,7 +553,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { newlen := len(fti.sfi) // Use sync.Pool to reduce allocating slices unnecessarily. - // The cost of the occasional locking is less than the cost of new allocation. + // The cost of sync.Pool is less than the cost of new allocation. pool, poolv, fkvs := encStructPoolGet(newlen) // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) @@ -512,25 +562,20 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { } newlen = 0 var kv stringRv + recur := e.h.RecursiveEmptyCheck for _, si := range tisfi { kv.r = si.field(rv, false) - // if si.i != -1 { - // rvals[newlen] = rv.Field(int(si.i)) - // } else { - // rvals[newlen] = rv.FieldByIndex(si.is) - // } if toMap { - if si.omitEmpty && isEmptyValue(kv.r) { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { continue } kv.v = si.encName } else { // use the zero value. // if a reference or struct, set to nil (so you do not output too much) - if si.omitEmpty && isEmptyValue(kv.r) { + if si.omitEmpty && isEmptyValue(kv.r, recur, recur) { switch kv.r.Kind() { - case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, - reflect.Map, reflect.Slice: + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: kv.r = reflect.Value{} //encode as nil } } @@ -541,7 +586,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { // debugf(">>>> kStruct: newlen: %v", newlen) // sep := !e.be - ee := e.e //don't dereference everytime + ee := e.e //don't dereference every time if toMap { ee.EncodeMapStart(newlen) @@ -596,13 +641,15 @@ func (f *encFnInfo) kStruct(rv reflect.Value) { // f.e.encodeValue(rv.Elem()) // } -func (f *encFnInfo) kInterface(rv reflect.Value) { - if rv.IsNil() { - f.e.e.EncodeNil() - return - } - f.e.encodeValue(rv.Elem(), nil) -} +// func (f *encFnInfo) kInterface(rv reflect.Value) { +// println("kInterface called") +// debug.PrintStack() +// if rv.IsNil() { +// f.e.e.EncodeNil() +// return +// } +// f.e.encodeValue(rv.Elem(), nil) +// } func (f *encFnInfo) kMap(rv reflect.Value) { ee := f.e.e @@ -877,6 +924,7 @@ type Encoder struct { // as the handler MAY need to do some coordination. w encWriter s []encRtidFn + ci set be bool // is binary encoding js bool // is json handle @@ -925,7 +973,7 @@ func newEncoder(h Handle) *Encoder { // Reset the Encoder with a new output stream. // -// This accomodates using the state of the Encoder, +// This accommodates using the state of the Encoder, // where it has "cached" information about sub-engines. func (e *Encoder) Reset(w io.Writer) { ww, ok := w.(ioEncWriterWriter) @@ -1032,20 +1080,6 @@ func (e *Encoder) MustEncode(v interface{}) { e.w.atEndOfEncode() } -// comment out these (Must)Write methods. They were only put there to support cbor. -// However, users already have access to the streams, and can write directly. -// -// // Write allows users write to the Encoder stream directly. -// func (e *Encoder) Write(bs []byte) (err error) { -// defer panicToErr(&err) -// e.w.writeb(bs) -// return -// } -// // MustWrite is like write, but panics if unable to Write. -// func (e *Encoder) MustWrite(bs []byte) { -// e.w.writeb(bs) -// } - func (e *Encoder) encode(iv interface{}) { // if ics, ok := iv.(Selfer); ok { // ics.CodecEncodeSelf(e) @@ -1057,7 +1091,8 @@ func (e *Encoder) encode(iv interface{}) { e.e.EncodeNil() case Selfer: v.CodecEncodeSelf(e) - + case Raw: + e.raw(v) case reflect.Value: e.encodeValue(v, nil) @@ -1133,20 +1168,23 @@ func (e *Encoder) encode(iv interface{}) { } } -func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { - if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) - fn.f(&fn.i, rv) - } -} - -func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) { +func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) { // use a goto statement instead of a recursive function for ptr/interface. TOP: switch rv.Kind() { - case reflect.Ptr, reflect.Interface: + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: if rv.IsNil() { e.e.EncodeNil() return @@ -1163,18 +1201,40 @@ TOP: return } - return rv, true + proceed = true + rv2 = rv + return +} + +func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr, + checkFastpath, checkCodecSelfer bool) { + if sptr != 0 { + if (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + } + if fn == nil { + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + // fn = e.getEncFn(rtid, rt, true, true) + fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer) + } + fn.f(&fn.i, rv) + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) { + if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed { + e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer) + } } func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) { // if a valid fn is passed, it MUST BE for the dereferenced type of rv - if rv, proceed := e.preEncodeValue(rv); proceed { - if fn == nil { - rt := rv.Type() - rtid := reflect.ValueOf(rt).Pointer() - fn = e.getEncFn(rtid, rt, true, true) - } - fn.f(&fn.i, rv) + if rv, sptr, proceed := e.preEncodeValue(rv); proceed { + e.doEncodeValue(rv, fn, sptr, true, true) } } @@ -1217,6 +1277,8 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo if checkCodecSelfer && ti.cs { fn.f = (*encFnInfo).selferMarshal + } else if rtid == rawTypId { + fn.f = (*encFnInfo).raw } else if rtid == rawExtTypId { fn.f = (*encFnInfo).rawExt } else if e.e.IsBuiltinType(rtid) { @@ -1234,7 +1296,7 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo } else { rk := rt.Kind() if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { - if rt.PkgPath() == "" { + if rt.PkgPath() == "" { // un-named slice or map if idx := fastpathAV.index(rtid); idx != -1 { fn.f = fastpathAV[idx].encfn } @@ -1284,10 +1346,11 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo fn.f = (*encFnInfo).kSlice case reflect.Struct: fn.f = (*encFnInfo).kStruct + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue // case reflect.Ptr: // fn.f = (*encFnInfo).kPtr - case reflect.Interface: - fn.f = (*encFnInfo).kInterface + // case reflect.Interface: + // fn.f = (*encFnInfo).kInterface case reflect.Map: fn.f = (*encFnInfo).kMap default: @@ -1320,6 +1383,18 @@ func (e *Encoder) asis(v []byte) { } } +func (e *Encoder) raw(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + if e.as == nil { + e.w.writeb(v) + } else { + e.as.EncodeAsis(v) + } +} + func (e *Encoder) errorf(format string, params ...interface{}) { err := fmt.Errorf(format, params...) panic(err) @@ -1353,25 +1428,6 @@ func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) { // panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed // } // idxpool := newlen / 8 - - // if pool == nil { - // fkvs = make([]stringRv, newlen) - // } else { - // poolv = pool.Get() - // switch vv := poolv.(type) { - // case *[8]stringRv: - // fkvs = vv[:newlen] - // case *[16]stringRv: - // fkvs = vv[:newlen] - // case *[32]stringRv: - // fkvs = vv[:newlen] - // case *[64]stringRv: - // fkvs = vv[:newlen] - // case *[128]stringRv: - // fkvs = vv[:newlen] - // } - // } - if newlen <= 8 { p = &encStructPool[0] v = p.Get() diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go rename to vendor/github.com/ugorji/go/codec/fast-path.generated.go index d968a500f..f2e5d2dcf 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.generated.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -23,7 +23,7 @@ package codec // Currently support // - slice of all builtin types, // - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) // This should provide adequate "typical" implementations. // // Note that fast track decode functions must handle values for which an address cannot be obtained. @@ -38,6 +38,8 @@ import ( "sort" ) +const fastpathEnabled = true + const fastpathCheckNilFalse = false // for reflect const fastpathCheckNilTrue = true // for type switch @@ -81,9 +83,6 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { - if !fastpathEnabled { - return - } i := 0 fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) @@ -373,9 +372,6 @@ func init() { // -- -- fast path type switch func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -1741,9 +1737,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -1829,9 +1822,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case map[interface{}]interface{}: @@ -3124,7 +3114,11 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { // -- -- fast path functions func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) { - fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { ee := e.e @@ -3145,8 +3139,39 @@ func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) { - fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { ee := e.e @@ -3167,8 +3192,39 @@ func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeString(c_UTF8, v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) { - fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { ee := e.e @@ -3189,8 +3245,39 @@ func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat32(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) { - fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { ee := e.e @@ -3211,8 +3298,39 @@ func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeFloat64(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) { - fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { ee := e.e @@ -3233,8 +3351,39 @@ func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) { - fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { ee := e.e @@ -3255,8 +3404,39 @@ func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) { - fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { ee := e.e @@ -3277,8 +3457,39 @@ func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) { - fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { ee := e.e @@ -3299,8 +3510,39 @@ func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeUint(uint64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) { - fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { ee := e.e @@ -3321,8 +3563,39 @@ func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + e.encode(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) { - fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { ee := e.e @@ -3343,8 +3616,39 @@ func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) { - fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { ee := e.e @@ -3365,8 +3669,39 @@ func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) { - fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { ee := e.e @@ -3387,8 +3722,39 @@ func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) { - fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { ee := e.e @@ -3409,8 +3775,39 @@ func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) { - fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { ee := e.e @@ -3431,8 +3828,39 @@ func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeInt(int64(v2)) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) { - fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { ee := e.e @@ -3453,6 +3881,33 @@ func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) { } } +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + ee.EncodeBool(v2) + } + if cr != nil { + cr.sendContainerState(containerMapEnd) + } +} + func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) { fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e) } @@ -15489,9 +15944,6 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) { // -- -- fast path type switch func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { case []interface{}: @@ -17712,7 +18164,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -17771,7 +18223,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]interface{}, 1, 4) @@ -17846,7 +18298,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -17905,7 +18357,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]string, 1, 4) @@ -17979,7 +18431,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18038,7 +18490,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]float32, 1, 4) @@ -18112,7 +18564,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18171,7 +18623,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]float64, 1, 4) @@ -18245,7 +18697,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18304,7 +18756,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint, 1, 4) @@ -18378,7 +18830,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18437,7 +18889,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint16, 1, 4) @@ -18511,7 +18963,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18570,7 +19022,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint32, 1, 4) @@ -18644,7 +19096,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18703,7 +19155,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uint64, 1, 4) @@ -18777,7 +19229,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18836,7 +19288,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool, changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]uintptr, 1, 4) @@ -18910,7 +19362,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -18969,7 +19421,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int, 1, 4) @@ -19043,7 +19495,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19102,7 +19554,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int8, 1, 4) @@ -19176,7 +19628,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19235,7 +19687,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int16, 1, 4) @@ -19309,7 +19761,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19368,7 +19820,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int32, 1, 4) @@ -19442,7 +19894,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19501,7 +19953,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]int64, 1, 4) @@ -19575,7 +20027,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -19634,7 +20086,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]bool, 1, 4) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl similarity index 94% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl rename to vendor/github.com/ugorji/go/codec/fast-path.go.tmpl index 58cc6df4c..c3ffdf93d 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl @@ -23,7 +23,7 @@ package codec // Currently support // - slice of all builtin types, // - map of all builtin types to string or interface value -// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) // This should provide adequate "typical" implementations. // // Note that fast track decode functions must handle values for which an address cannot be obtained. @@ -38,6 +38,8 @@ import ( "sort" ) +const fastpathEnabled = true + const fastpathCheckNilFalse = false // for reflect const fastpathCheckNilTrue = true // for type switch @@ -81,9 +83,6 @@ var fastpathAV fastpathA // due to possible initialization loop error, make fastpath in an init() func init() { - if !fastpathEnabled { - return - } i := 0 fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) { xrt := reflect.TypeOf(v) @@ -106,9 +105,6 @@ func init() { // -- -- fast path type switch func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} @@ -126,9 +122,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}: @@ -144,9 +137,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { } func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if .MapKey }} case map[{{ .MapKey }}]{{ .Elem }}: @@ -165,7 +155,11 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} func (f *encFnInfo) {{ .MethodNamePfx "fastpathEnc" false }}R(rv reflect.Value) { - fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + if f.ti.mbs { + fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } else { + fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv.Interface().([]{{ .Elem }}), fastpathCheckNilFalse, f.e) + } } func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { ee := e.e @@ -182,6 +176,31 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, checkNil b if cr != nil { cr.sendContainerState(containerArrayEnd) }{{/* ee.EncodeEnd() */}} } +func (_ fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, checkNil bool, e *Encoder) { + ee := e.e + cr := e.cr + if checkNil && v == nil { + ee.EncodeNil() + return + } + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.EncodeMapStart(len(v) / 2) + for j, v2 := range v { + if cr != nil { + if j%2 == 0 { + cr.sendContainerState(containerMapKey) + } else { + cr.sendContainerState(containerMapValue) + } + } + {{ encmd .Elem "v2"}} + } + if cr != nil { cr.sendContainerState(containerMapEnd) } +} + {{end}}{{end}}{{end}} {{range .Values}}{{if not .Primitive}}{{if .MapKey }} @@ -257,9 +276,6 @@ func (_ fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Ele // -- -- fast path type switch func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { - if !fastpathEnabled { - return false - } switch v := iv.(type) { {{range .Values}}{{if not .Primitive}}{{if not .MapKey }} case []{{ .Elem }}:{{else}} @@ -328,7 +344,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b changed = true } slh.End() - return + return v, changed } if containerLenS > 0 { @@ -391,7 +407,7 @@ func (_ fastpathT) {{ .MethodNamePfx "Dec" false }}V(v []{{ .Elem }}, checkNil b changed = true } slh.End() - return + return v, changed } if cap(v) == 0 { v = make([]{{ .Elem }}, 1, 4) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go rename to vendor/github.com/ugorji/go/codec/fast-path.not.go index d6f5f0c91..63e591145 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/fast-path.not.go +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -4,6 +4,8 @@ package codec import "reflect" +const fastpathEnabled = false + // The generated fast-path code is very large, and adds a few seconds to the build time. // This causes test execution, execution of small tools which use codec, etc // to take a long time. diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl similarity index 96% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl rename to vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl index 2caae5bfd..32df54144 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl @@ -1,6 +1,7 @@ {{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} if {{var "l"}} == 0 { {{if isSlice }}if {{var "v"}} == nil { {{var "v"}} = []{{ .Typ }}{} @@ -26,6 +27,8 @@ if {{var "l"}} == 0 { } {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) if {{var "l"}} > cap({{var "v"}}) { {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) {{ else }}{{if not .Immutable }} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl similarity index 100% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl rename to vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go similarity index 95% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go rename to vendor/github.com/ugorji/go/codec/gen-helper.generated.go index 22bce776b..eb0bdad35 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -1,4 +1,4 @@ -// //+build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -17,7 +17,7 @@ import ( // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. +// library users. They WILL change continuously and without notice. // // To help enforce this, we create an unexported type with exported members. // The only way to get the type is via the one exported type that we control (somewhat). @@ -83,6 +83,11 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { f.e.marshal(bs, fnerr, false, c_RAW) } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} + // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) TimeRtidIfBinc() uintptr { if _, ok := f.e.hh.(*BincHandle); ok { @@ -191,6 +196,11 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { } } +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} + // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) TimeRtidIfBinc() uintptr { if _, ok := f.d.hh.(*BincHandle); ok { diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl rename to vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl index 31958574f..ad99f6671 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl +++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl @@ -1,4 +1,4 @@ -// //+build ignore +/* // +build ignore */ // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -17,7 +17,7 @@ import ( // This file is used to generate helper code for codecgen. // The values here i.e. genHelper(En|De)coder are not to be used directly by -// library users. They WILL change continously and without notice. +// library users. They WILL change continuously and without notice. // // To help enforce this, we create an unexported type with exported members. // The only way to get the type is via the one exported type that we control (somewhat). @@ -79,6 +79,10 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { f.e.marshal(bs, fnerr, false, c_RAW) } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { + f.e.raw(iv) +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperEncoder) TimeRtidIfBinc() uintptr { if _, ok := f.e.hh.(*BincHandle); ok { return timeTypId @@ -172,6 +176,10 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { } } // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { + return f.d.raw() +} +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* func (f genHelperDecoder) TimeRtidIfBinc() uintptr { if _, ok := f.d.hh.(*BincHandle); ok { return timeTypId diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go rename to vendor/github.com/ugorji/go/codec/gen.generated.go index fb6f4b809..2ace97b78 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.generated.go +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -68,8 +68,9 @@ z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) const genDecListTmpl = ` {{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} -{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} if {{var "l"}} == 0 { {{if isSlice }}if {{var "v"}} == nil { {{var "v"}} = []{{ .Typ }}{} @@ -95,6 +96,8 @@ if {{var "l"}} == 0 { } {{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}} var {{var "rt"}} bool {{/* truncated */}} + _, _ = {{var "rl"}}, {{var "rt"}} + {{var "rr"}} = {{var "l"}} // len({{var "v"}}) if {{var "l"}} > cap({{var "v"}}) { {{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}}) {{ else }}{{if not .Immutable }} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go similarity index 90% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go rename to vendor/github.com/ugorji/go/codec/gen.go index a075e7c0d..c4944dbff 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen.go +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -12,7 +12,6 @@ import ( "io" "io/ioutil" "math/rand" - "os" "reflect" "regexp" "sort" @@ -21,11 +20,14 @@ import ( "sync" "text/template" "time" + "unicode" + "unicode/utf8" ) // --------------------------------------------------- // codecgen supports the full cycle of reflection-based codec: // - RawExt +// - Raw // - Builtins // - Extensions // - (Binary|Text|JSON)(Unm|M)arshal @@ -76,7 +78,7 @@ import ( // codecgen will panic if the file was generated with an old version of the library in use. // // Note: -// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. // This way, there isn't a function call overhead just to see that we should not enter a block of code. // GenVersion is the current version of codecgen. @@ -124,6 +126,7 @@ var ( genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice") genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) + genCheckVendor bool ) // genRunner holds some state used during a Gen run. @@ -162,6 +165,10 @@ type genRunner struct { // // Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.* func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + if len(typ) == 0 { return } @@ -199,7 +206,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.genRefPkgs(t) } if buildTags != "" { - x.line("//+build " + buildTags) + x.line("// +build " + buildTags) x.line("") } x.line(` @@ -266,6 +273,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.line("type " + x.hn + " struct{}") x.line("") + x.varsfxreset() x.line("func init() {") x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion) x.line("_, file, _, _ := runtime.Caller(0)") @@ -309,6 +317,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn for _, t := range x.ts { rtid := reflect.ValueOf(t).Pointer() // generate enc functions for all these slice/map types. + x.varsfxreset() x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) x.genRequiredMethodVars(true) switch t.Kind() { @@ -323,6 +332,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn x.line("") // generate dec functions for all these slice/map types. + x.varsfxreset() x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) x.genRequiredMethodVars(false) switch t.Kind() { @@ -377,7 +387,7 @@ func (x *genRunner) genRefPkgs(t reflect.Type) { x.imn[tpkg] = tpkg } else { x.imc++ - x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + tpkg[idx+1:] + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) } } } @@ -408,6 +418,10 @@ func (x *genRunner) varsfx() string { return strconv.FormatUint(x.c, 10) } +func (x *genRunner) varsfxreset() { + x.c = 0 +} + func (x *genRunner) out(s string) { if _, err := io.WriteString(x.w, s); err != nil { panic(err) @@ -494,6 +508,7 @@ func (x *genRunner) selfer(encode bool) { // always make decode use a pointer receiver, // and structs always use a ptr receiver (encode|decode) isptr := !encode || t.Kind() == reflect.Struct + x.varsfxreset() fnSigPfx := "func (x " if isptr { fnSigPfx += "*" @@ -566,9 +581,28 @@ func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) { } else { x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname) } - if _, ok := x.tm[t]; !ok { - x.tm[t] = struct{}{} - x.ts = append(x.ts, t) + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) } } @@ -608,22 +642,33 @@ func (x *genRunner) encVar(varname string, t reflect.Type) { } -// enc will encode a variable (varname) of type T, -// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type *T (to prevent copying) +// enc will encode a variable (varname) of type t, +// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying) func (x *genRunner) enc(varname string, t reflect.Type) { - // varName here must be to a pointer to a struct/array, or to a value directly. rtid := reflect.ValueOf(t).Pointer() // We call CodecEncodeSelf if one of the following are honored: // - the type already implements Selfer, call that // - the type has a Selfer implementation just created, use that // - the type is in the list of the ones we will generate for, but it is not currently being generated + mi := x.varsfx() tptr := reflect.PtrTo(t) tk := t.Kind() if x.checkForSelfer(t, varname) { - if t.Implements(selferTyp) || (tptr.Implements(selferTyp) && (tk == reflect.Array || tk == reflect.Struct)) { - x.line(varname + ".CodecEncodeSelf(e)") - return + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } } if _, ok := x.te[rtid]; ok { @@ -651,14 +696,17 @@ func (x *genRunner) enc(varname string, t reflect.Type) { } // check if - // - type is RawExt + // - type is RawExt, Raw // - the type implements (Text|JSON|Binary)(Unm|M)arshal - mi := x.varsfx() x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi) x.linef("_ = %sm%s", genTempVarPfx, mi) x.line("if false {") //start if block defer func() { x.line("}") }() //end if block + if t == rawTyp { + x.linef("} else { z.EncRaw(%v)", varname) + return + } if t == rawExtTyp { x.linef("} else { r.EncodeRawExt(%v, e)", varname) return @@ -676,15 +724,31 @@ func (x *genRunner) enc(varname string, t reflect.Type) { // first check if extensions are configued, before doing the interface conversion x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname) } - if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { - x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) - } - if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { - x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) - } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { - x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T + if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } + } else { // varname is of type T + if t.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname) + } + if t.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname) + } else if t.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname) + } else if tptr.Implements(textMarshalerTyp) { + x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname) + } } - x.line("} else {") switch t.Kind() { @@ -922,6 +986,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { } func (x *genRunner) encListFallback(varname string, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname) + return + } i := x.varsfx() g := genTempVarPfx x.line("r.EncodeArrayStart(len(" + varname + "))") @@ -1020,6 +1092,8 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) { } } +// dec will decode a variable (varname) of type ptrTo(t). +// t is always a basetype (i.e. not of kind reflect.Ptr). func (x *genRunner) dec(varname string, t reflect.Type) { // assumptions: // - the varname is to a pointer already. No need to take address of it @@ -1056,7 +1130,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) { } // check if - // - type is RawExt + // - type is Raw, RawExt // - the type implements (Text|JSON|Binary)(Unm|M)arshal mi := x.varsfx() x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) @@ -1064,6 +1138,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) { x.line("if false {") //start if block defer func() { x.line("}") }() //end if block + if t == rawTyp { + x.linef("} else { *%v = z.DecRaw()", varname) + return + } if t == rawExtTyp { x.linef("} else { r.DecodeExt(%v, 0, nil)", varname) return @@ -1189,59 +1267,49 @@ func (x *genRunner) dec(varname string, t reflect.Type) { } func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) { - // We have to use the actual type name when doing a direct assignment. - // We don't have the luxury of casting the pointer to the underlying type. - // - // Consequently, in the situation of a - // type Message int32 - // var x Message - // var i int32 = 32 - // x = i // this will bomb - // x = Message(i) // this will work - // *((*int32)(&x)) = i // this will work - // - // Consequently, we replace: - // case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))") - // with: - // case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))") + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) - xfn := func(t reflect.Type) string { - return x.genTypeNamePrim(t) + if t.Name() != "" { + tryAsPtr = true + return } + switch t.Kind() { case reflect.Int: - x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs) case reflect.Int8: - x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(8)", varname) case reflect.Int16: - x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(16)", varname) case reflect.Int32: - x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(32)", varname) case reflect.Int64: - x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t)) + x.linef("%s = r.DecodeInt(64)", varname) case reflect.Uint: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) case reflect.Uint8: - x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(8)", varname) case reflect.Uint16: - x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(16)", varname) case reflect.Uint32: - x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(32)", varname) case reflect.Uint64: - x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t)) + x.linef("%s = r.DecodeUint(64)", varname) case reflect.Uintptr: - x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs) + x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs) case reflect.Float32: - x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t)) + x.linef("%s = r.DecodeFloat(true)", varname) case reflect.Float64: - x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t)) + x.linef("%s = r.DecodeFloat(false)", varname) case reflect.Bool: - x.linef("%s = %s(r.DecodeBool())", varname, xfn(t)) + x.linef("%s = r.DecodeBool()", varname) case reflect.String: - x.linef("%s = %s(r.DecodeString())", varname, xfn(t)) + x.linef("%s = r.DecodeString()", varname) default: tryAsPtr = true } @@ -1249,6 +1317,14 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs } func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname) + return + } type tstruc struct { TempVar string Rand string @@ -1364,7 +1440,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt if si.i != -1 { t2 = t.Field(int(si.i)) } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname @@ -1452,7 +1528,7 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid if si.i != -1 { t2 = t.Field(int(si.i)) } else { - //we must accomodate anonymous fields, where the embedded field is a nil pointer in the value. + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. // t2 = t.FieldByIndex(si.is) t2typ := t varname3 := varname @@ -1569,8 +1645,6 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string { } -var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" - // genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. // // This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, @@ -1592,6 +1666,26 @@ func genImportPath(t reflect.Type) (s string) { return } +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + func genNonPtr(t reflect.Type) reflect.Type { for t.Kind() == reflect.Ptr { t = t.Elem() @@ -1601,7 +1695,7 @@ func genNonPtr(t reflect.Type) reflect.Type { func genTitleCaseName(s string) string { switch s { - case "interface{}": + case "interface{}", "interface {}": return "Intf" default: return strings.ToUpper(s[0:1]) + s[1:] @@ -1704,7 +1798,7 @@ func (x genInternal) FastpathLen() (l int) { func genInternalZeroValue(s string) string { switch s { - case "interface{}": + case "interface{}", "interface {}": return "nil" case "bool": return "false" @@ -1856,7 +1950,7 @@ func genInternalInit() { } var gt genInternal - // For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function for _, s := range types { gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. diff --git a/vendor/github.com/ugorji/go/codec/gen_15.go b/vendor/github.com/ugorji/go/codec/gen_15.go new file mode 100644 index 000000000..ab76c3102 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_15.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_16.go b/vendor/github.com/ugorji/go/codec/gen_16.go new file mode 100644 index 000000000..87c04e2e1 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_16.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6 + +package codec + +import "os" + +func init() { + genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" +} diff --git a/vendor/github.com/ugorji/go/codec/gen_17.go b/vendor/github.com/ugorji/go/codec/gen_17.go new file mode 100644 index 000000000..3881a43ce --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen_17.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +func init() { + genCheckVendor = true +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go similarity index 82% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go rename to vendor/github.com/ugorji/go/codec/helper.go index 560014ae3..8b94fc1e4 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper.go +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -38,10 +38,6 @@ package codec // a length prefix, or if it used explicit breaks. If length-prefixed, we assume that // it has to be binary, and we do not even try to read separators. // -// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length. -// It may suffer because we treat it like a text-based codec, and read separators. -// However, this read is a no-op and the cost is insignificant. -// // Philosophy // ------------ // On decode, this codec will update containers appropriately: @@ -137,17 +133,6 @@ const ( // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. recoverPanicToErr = true - // Fast path functions try to create a fast path encode or decode implementation - // for common maps and slices, by by-passing reflection altogether. - fastpathEnabled = true - - // if checkStructForEmptyValue, check structs fields to see if an empty value. - // This could be an expensive call, so possibly disable it. - checkStructForEmptyValue = false - - // if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue - derefForIsEmptyValue = false - // if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first. // Only concern is that, if the slice already contained some garbage, we will decode into that garbage. // The chances of this are slim, so leave this "optimization". @@ -155,8 +140,10 @@ const ( resetSliceElemToZeroValue bool = false ) -var oneByteArr = [1]byte{0} -var zeroByteSlice = oneByteArr[:0:0] +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) type charEncoding uint8 @@ -215,6 +202,41 @@ const ( containerArrayEnd ) +// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +type sfiIdx struct { + name string + index int +} + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// Java's PMD rules set TooManyFields threshold to 15. +const rgetPoolTArrayLen = 12 + +type rgetT struct { + fNames []string + encNames []string + etypes []uintptr + sfis []*structFieldInfo +} + +type rgetPoolT struct { + fNames [rgetPoolTArrayLen]string + encNames [rgetPoolTArrayLen]string + etypes [rgetPoolTArrayLen]uintptr + sfis [rgetPoolTArrayLen]*structFieldInfo + sfiidx [rgetPoolTArrayLen]sfiIdx +} + +var rgetPool = sync.Pool{ + New: func() interface{} { return new(rgetPoolT) }, +} + type containerStateRecv interface { sendContainerState(containerState) } @@ -240,6 +262,7 @@ var ( stringTyp = reflect.TypeOf("") timeTyp = reflect.TypeOf(time.Time{}) rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) uint8SliceTyp = reflect.TypeOf([]uint8(nil)) mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() @@ -257,6 +280,7 @@ var ( uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + rawTypId = reflect.ValueOf(rawTyp).Pointer() intfTypId = reflect.ValueOf(intfTyp).Pointer() timeTypId = reflect.ValueOf(timeTyp).Pointer() stringTypId = reflect.ValueOf(stringTyp).Pointer() @@ -337,6 +361,11 @@ type Handle interface { isBinary() bool } +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and store the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set. +type Raw []byte + // RawExt represents raw unprocessed extension data. // Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag. // @@ -347,7 +376,7 @@ type RawExt struct { // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types Data []byte // Value represents the extension, if Data is nil. - // Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types. + // Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types. Value interface{} } @@ -525,7 +554,7 @@ func (o *extHandle) AddExt( func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { // o is a pointer, because we may need to initialize it if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { - err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T", reflect.Zero(rt).Interface()) return } @@ -568,7 +597,8 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn { } type structFieldInfo struct { - encName string // encode name + encName string // encode name + fieldName string // field name // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. @@ -714,6 +744,7 @@ type typeInfo struct { } func (ti *typeInfo) indexForEncName(name string) int { + // NOTE: name may be a stringView, so don't pass it to another function. //tisfi := ti.sfi const binarySearchThreshold = 16 if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { @@ -828,19 +859,19 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { } if rt.Kind() == reflect.Struct { - var siInfo *structFieldInfo + var omitEmpty bool if f, ok := rt.FieldByName(structInfoFieldName); ok { - siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) + siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag)) ti.toArray = siInfo.toArray + omitEmpty = siInfo.omitEmpty } - sfip := make([]*structFieldInfo, 0, rt.NumField()) - x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo) - - ti.sfip = make([]*structFieldInfo, len(sfip)) - ti.sfi = make([]*structFieldInfo, len(sfip)) - copy(ti.sfip, sfip) - sort.Sort(sfiSortedByEncName(sfip)) - copy(ti.sfi, sfip) + pi := rgetPool.Get() + pv := pi.(*rgetPoolT) + pv.etypes[0] = ti.baseId + vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0]) + rgetPool.Put(pi) } // sfi = sfip @@ -853,17 +884,30 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { return } -func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, - sfi *[]*structFieldInfo, siInfo *structFieldInfo, +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []int, pv *rgetT, ) { - for j := 0; j < rt.NumField(); j++ { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + +LOOP: + for j, jlen := 0, rt.NumField(); j < jlen; j++ { f := rt.Field(j) fkind := f.Type.Kind() // skip if a func type, or is unexported, or structTag value == "-" - if fkind == reflect.Func { - continue + switch fkind { + case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer: + continue LOOP } - // if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + + // if r1, _ := utf8.DecodeRuneInString(f.Name); + // r1 == utf8.RuneError || !unicode.IsUpper(r1) { if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded continue } @@ -872,7 +916,8 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st continue } var si *structFieldInfo - // if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it. + // if anonymous and no struct tag (or it's blank), + // and a struct (or pointer to struct), inline it. if f.Anonymous && fkind != reflect.Interface { doInline := stag == "" if !doInline { @@ -886,11 +931,31 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st ft = ft.Elem() } if ft.Kind() == reflect.Struct { - indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4) - copy(indexstack2, indexstack) - indexstack2[len(indexstack)] = j - // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) - x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo) + // if etypes contains this, don't call rget again (as fields are already seen here) + ftid := reflect.ValueOf(ft).Pointer() + // We cannot recurse forever, but we need to track other field depths. + // So - we break if we see a type twice (not the first time). + // This should be sufficient to handle an embedded type that refers to its + // owning type, which then refers to its embedded type. + processIt := true + numk := 0 + for _, k := range pv.etypes { + if k == ftid { + numk++ + if numk == rgetMaxRecursion { + processIt = false + break + } + } + } + if processIt { + pv.etypes = append(pv.etypes, ftid) + indexstack2 := make([]int, len(indexstack)+1) + copy(indexstack2, indexstack) + indexstack2[len(indexstack)] = j + // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + x.rget(ft, ftid, omitEmpty, indexstack2, pv) + } continue } } @@ -901,36 +966,86 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st continue } - // do not let fields with same name in embedded structs override field at higher level. - // this must be done after anonymous check, to allow anonymous field - // still include their child fields - if _, ok := fnameToHastag[f.Name]; ok { - continue - } if f.Name == "" { panic(noFieldNameToStructFieldInfoErr) } + + pv.fNames = append(pv.fNames, f.Name) + if si == nil { si = parseStructFieldInfo(f.Name, stag) } else if si.encName == "" { si.encName = f.Name } + si.fieldName = f.Name + + pv.encNames = append(pv.encNames, si.encName) + // si.ikind = int(f.Type.Kind()) if len(indexstack) == 0 { si.i = int16(j) } else { si.i = -1 - si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + si.is = make([]int, len(indexstack)+1) + copy(si.is, indexstack) + si.is[len(indexstack)] = j + // si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) } - if siInfo != nil { - if siInfo.omitEmpty { - si.omitEmpty = true + if omitEmpty { + si.omitEmpty = true + } + pv.sfis = append(pv.sfis, si) + } +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) { + var n int + for i, v := range x { + xn := v.encName //TODO: fieldName or encName? use encName for now. + var found bool + for j, k := range pv { + if k.name == xn { + // one of them must be reset to nil, and the index updated appropriately to the other one + if len(v.is) == len(x[k.index].is) { + } else if len(v.is) < len(x[k.index].is) { + pv[j].index = i + if x[k.index] != nil { + x[k.index] = nil + n++ + } + } else { + if x[i] != nil { + x[i] = nil + n++ + } + } + found = true + break } } - *sfi = append(*sfi, si) - fnameToHastag[f.Name] = stag != "" + if !found { + pv = append(pv, sfiIdx{xn, i}) + } } + + // remove all the nils + y = make([]*structFieldInfo, len(x)-n) + n = 0 + for _, v := range x { + if v == nil { + continue + } + y[n] = v + n++ + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + return } func panicToErr(err *error) { @@ -1127,3 +1242,73 @@ type bytesISlice []bytesI func (p bytesISlice) Len() int { return len(p) } func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }() + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + // defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }() + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go rename to vendor/github.com/ugorji/go/codec/helper_internal.go index dea981fbb..5d0727f77 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_internal.go +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -70,8 +70,8 @@ func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool { return false } -func isEmptyValue(v reflect.Value) bool { - return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue) +func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool { + return hIsEmptyValue(v, deref, checkStruct) } func pruneSignExt(v []byte, pos bool) (n int) { diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go rename to vendor/github.com/ugorji/go/codec/helper_not_unsafe.go index 7c2ffc0fd..8b06a0045 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_not_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -1,4 +1,4 @@ -//+build !unsafe +// +build !unsafe // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go similarity index 74% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go rename to vendor/github.com/ugorji/go/codec/helper_unsafe.go index 373b2b102..0f596c71a 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/helper_unsafe.go +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -1,4 +1,4 @@ -//+build unsafe +// +build unsafe // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. @@ -16,7 +16,7 @@ type unsafeString struct { Len int } -type unsafeBytes struct { +type unsafeSlice struct { Data uintptr Len int Cap int @@ -29,8 +29,10 @@ func stringView(v []byte) string { if len(v) == 0 { return "" } - x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)} - return *(*string)(unsafe.Pointer(&x)) + + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + sx := unsafeString{bx.Data, bx.Len} + return *(*string)(unsafe.Pointer(&sx)) } // bytesView returns a view of the string as a []byte. @@ -40,6 +42,8 @@ func bytesView(v string) []byte { if len(v) == 0 { return zeroByteSlice } - x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)} - return *(*[]byte)(unsafe.Pointer(&x)) + + sx := (*unsafeString)(unsafe.Pointer(&v)) + bx := unsafeSlice{sx.Data, sx.Len, sx.Len} + return *(*[]byte)(unsafe.Pointer(&bx)) } diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go similarity index 85% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go rename to vendor/github.com/ugorji/go/codec/json.go index a18a5f706..5bb389628 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/json.go +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -43,18 +43,23 @@ import ( //-------------------------------- -var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} +var ( + jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'} -var jsonFloat64Pow10 = [...]float64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, - 1e20, 1e21, 1e22, -} + jsonFloat64Pow10 = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, + } -var jsonUint64Pow10 = [...]uint64{ - 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} + jsonUint64Pow10 = [...]uint64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces string +) const ( // jsonUnreadAfterDecNum controls whether we unread after decoding a number. @@ -85,8 +90,23 @@ const ( jsonNumUintMaxVal = 1< 1<<53 || v < -(1<<53)) { + e.w.writen1('"') + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) + e.w.writen1('"') + return + } e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) } func (e *jsonEncDriver) EncodeUint(v uint64) { + if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 { + e.w.writen1('"') + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) + e.w.writen1('"') + return + } e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) } @@ -165,11 +251,17 @@ func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { } func (e *jsonEncDriver) EncodeArrayStart(length int) { + if e.d { + e.dl++ + } e.w.writen1('[') e.c = containerArrayStart } func (e *jsonEncDriver) EncodeMapStart(length int) { + if e.d { + e.dl++ + } e.w.writen1('{') e.c = containerMapStart } @@ -564,6 +656,11 @@ func (d *jsonDecDriver) decNum(storeBytes bool) { d.tok = b } b := d.tok + var str bool + if b == '"' { + str = true + b = d.r.readn1() + } if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) { d.d.errorf("json: decNum: got first char '%c'", b) return @@ -578,6 +675,10 @@ func (d *jsonDecDriver) decNum(storeBytes bool) { n.reset() d.bs = d.bs[:0] + if str && storeBytes { + d.bs = append(d.bs, '"') + } + // The format of a number is as below: // parsing: sign? digit* dot? digit* e? sign? digit* // states: 0 1* 2 3* 4 5* 6 7 @@ -668,6 +769,14 @@ LOOP: default: break LOOP } + case '"': + if str { + if storeBytes { + d.bs = append(d.bs, '"') + } + b, eof = r.readn1eof() + } + break LOOP default: break LOOP } @@ -822,6 +931,11 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [ if isstring { return d.bs } + // if appendStringAsBytes returned a zero-len slice, then treat as nil. + // This should only happen for null, and "". + if len(d.bs) == 0 { + return nil + } bs0 := d.bs slen := base64.StdEncoding.DecodedLen(len(bs0)) if slen <= cap(bs) { @@ -859,6 +973,14 @@ func (d *jsonDecDriver) appendStringAsBytes() { } d.tok = b } + + // handle null as a string + if d.tok == 'n' { + d.readStrIdx(10, 13) // ull + d.bs = d.bs[:0] + return + } + if d.tok != '"' { d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok) } @@ -1033,6 +1155,24 @@ type JsonHandle struct { // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. // If not configured, raw bytes are encoded to/from base64 text. RawBytesExt InterfaceExt + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString uint8 } func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { @@ -1040,26 +1180,48 @@ func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceE } func (h *JsonHandle) newEncDriver(e *Encoder) encDriver { - hd := jsonEncDriver{e: e, w: e.w, h: h} + hd := jsonEncDriver{e: e, h: h} hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt + + hd.reset() + return &hd } func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} - hd := jsonDecDriver{d: d, r: d.r, h: h} + hd := jsonDecDriver{d: d, h: h} hd.bs = hd.b[:0] - hd.se.i = h.RawBytesExt + hd.reset() return &hd } func (e *jsonEncDriver) reset() { e.w = e.e.w + e.se.i = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } + e.d, e.dt, e.dl, e.ds = false, false, 0, "" + e.c = 0 + if e.h.Indent > 0 { + e.d = true + e.ds = jsonSpaces[:e.h.Indent] + } else if e.h.Indent < 0 { + e.d = true + e.dt = true + e.ds = jsonTabs[:-(e.h.Indent)] + } } func (d *jsonDecDriver) reset() { d.r = d.d.r + d.se.i = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + d.n.reset() } var jsonEncodeTerminate = []byte{' '} diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go similarity index 99% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go rename to vendor/github.com/ugorji/go/codec/msgpack.go index 5eb4c9636..e79830b56 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/msgpack.go +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -374,7 +374,7 @@ func (d *msgpackDecDriver) DecodeNaked() { } if n.v == valueTypeUint && d.h.SignedInteger { n.v = valueTypeInt - n.i = int64(n.v) + n.i = int64(n.u) } return } @@ -561,6 +561,13 @@ func (d *msgpackDecDriver) readNextBd() { d.bdRead = true } +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *msgpackDecDriver) ContainerType() (vt valueType) { bd := d.bd if bd == mpNil { @@ -729,6 +736,7 @@ func (e *msgpackEncDriver) reset() { func (d *msgpackDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } //-------------------------------------------------- diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go b/vendor/github.com/ugorji/go/codec/noop.go similarity index 100% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go rename to vendor/github.com/ugorji/go/codec/noop.go diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go b/vendor/github.com/ugorji/go/codec/prebuild.go similarity index 100% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.go rename to vendor/github.com/ugorji/go/codec/prebuild.go diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh b/vendor/github.com/ugorji/go/codec/prebuild.sh old mode 100644 new mode 100755 similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh rename to vendor/github.com/ugorji/go/codec/prebuild.sh index 98f442487..909f4bb0f --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/prebuild.sh +++ b/vendor/github.com/ugorji/go/codec/prebuild.sh @@ -171,7 +171,7 @@ do 'xf') zforce=1;; 'xb') zbak=1;; 'xx') zexternal=1;; - *) echo "prebuild.sh accepts [-fb] only"; return 1;; + *) echo "prebuild.sh accepts [-fbx] only"; return 1;; esac done shift $((OPTIND-1)) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go similarity index 98% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go rename to vendor/github.com/ugorji/go/codec/rpc.go index dad53d0c6..8062bed31 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/rpc.go +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -25,7 +25,7 @@ type Rpc interface { } // RpcCodecBuffered allows access to the underlying bufio.Reader/Writer -// used by the rpc connection. It accomodates use-cases where the connection +// used by the rpc connection. It accommodates use-cases where the connection // should be used by rpc and non-rpc functions, e.g. streaming a file after // sending an rpc response. type RpcCodecBuffered interface { diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go similarity index 97% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go rename to vendor/github.com/ugorji/go/codec/simple.go index c15049650..d07208c87 100644 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/simple.go +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -166,6 +166,13 @@ func (d *simpleDecDriver) readNextBd() { d.bdRead = true } +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + func (d *simpleDecDriver) ContainerType() (vt valueType) { if d.bd == simpleVdNil { return valueTypeNil @@ -340,7 +347,7 @@ func (d *simpleDecDriver) decLen() int { } return int(ui) } - d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) return -1 } @@ -474,7 +481,7 @@ func (d *simpleDecDriver) DecodeNaked() { // SimpleHandle is a Handle for a very simple encoding format. // // simple is a simplistic codec similar to binc, but not as compact. -// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - Encoding of a value is always preceded by the descriptor byte (bd) // - True, false, nil are encoded fully in 1 byte (the descriptor) // - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). // There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. @@ -512,6 +519,7 @@ func (e *simpleEncDriver) reset() { func (d *simpleDecDriver) reset() { d.r = d.d.r + d.bd, d.bdRead = 0, false } var _ decDriver = (*simpleDecDriver)(nil) diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json similarity index 100% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test-cbor-goldens.json rename to vendor/github.com/ugorji/go/codec/test-cbor-goldens.json diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py old mode 100644 new mode 100755 similarity index 91% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py rename to vendor/github.com/ugorji/go/codec/test.py index dfe3b0c9a..c0ad20b34 --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py +++ b/vendor/github.com/ugorji/go/codec/test.py @@ -9,6 +9,8 @@ # sudo apt-get install python-pip # pip install --user msgpack-python msgpack-rpc-python cbor +# Ensure all "string" keys are utf strings (else encoded as bytes) + import cbor, msgpack, msgpackrpc, sys, os, threading def get_test_data_list(): @@ -26,35 +28,39 @@ def get_test_data_list(): -3232.0, -6464646464.0, 3232.0, + 6464.0, 6464646464.0, False, True, + u"null", None, u"someday", - u"", - u"bytestring", 1328176922000002000, + u"", -2206187877999998000, + u"bytestring", 270, + u"none", -2013855847999995777, #-6795364578871345152, ] l1 = [ { "true": True, "false": False }, - { "true": "True", + { "true": u"True", "false": False, "uint16(1616)": 1616 }, { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ], "int32":32323232, "bool": True, - "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", - "SHORT STRING": "1234567890" }, - { True: "true", 8: False, "false": 0 } + "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": u"1234567890" }, + { True: "true", 138: False, "false": 200 } ] l = [] l.extend(l0) l.append(l0) + l.append(1) l.extend(l1) return l diff --git a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh b/vendor/github.com/ugorji/go/codec/tests.sh old mode 100644 new mode 100755 similarity index 54% rename from vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh rename to vendor/github.com/ugorji/go/codec/tests.sh index b1602ea7e..342f336df --- a/vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/tests.sh +++ b/vendor/github.com/ugorji/go/codec/tests.sh @@ -6,6 +6,7 @@ _run() { # 1. VARIATIONS: regular (t), canonical (c), IO R/W (i), # binc-nosymbols (n), struct2array (s), intern string (e), + # json-indent (d), circular (l) # 2. MODE: reflection (r), external (x), codecgen (g), unsafe (u), notfastpath (f) # 3. OPTIONS: verbose (v), reset (z), must (m), # @@ -16,7 +17,7 @@ _run() { zargs="" local OPTIND OPTIND=1 - while getopts "xurtcinsvgzmef" flag + while getopts "_xurtcinsvgzmefdl" flag do case "x$flag" in 'xr') ;; @@ -27,6 +28,7 @@ _run() { 'xv') zargs="$zargs -tv" ;; 'xz') zargs="$zargs -tr" ;; 'xm') zargs="$zargs -tm" ;; + 'xl') zargs="$zargs -tl" ;; *) ;; esac done @@ -35,15 +37,19 @@ _run() { # echo ">>>>>>> TAGS: $ztags" OPTIND=1 - while getopts "xurtcinsvgzmef" flag + while getopts "_xurtcinsvgzmefdl" flag do case "x$flag" in 'xt') printf ">>>>>>> REGULAR : "; go test "-tags=$ztags" $zargs ; sleep 2 ;; 'xc') printf ">>>>>>> CANONICAL : "; go test "-tags=$ztags" $zargs -tc; sleep 2 ;; 'xi') printf ">>>>>>> I/O : "; go test "-tags=$ztags" $zargs -ti; sleep 2 ;; - 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" $zargs -tn; sleep 2 ;; + 'xn') printf ">>>>>>> NO_SYMBOLS : "; go test "-tags=$ztags" -run=Binc $zargs -tn; sleep 2 ;; 'xs') printf ">>>>>>> TO_ARRAY : "; go test "-tags=$ztags" $zargs -ts; sleep 2 ;; 'xe') printf ">>>>>>> INTERN : "; go test "-tags=$ztags" $zargs -te; sleep 2 ;; + 'xd') printf ">>>>>>> INDENT : "; + go test "-tags=$ztags" -run=JsonCodecsTable -td=-1 $zargs; + go test "-tags=$ztags" -run=JsonCodecsTable -td=8 $zargs; + sleep 2 ;; *) ;; esac done @@ -53,22 +59,44 @@ _run() { } # echo ">>>>>>> RUNNING VARIATIONS OF TESTS" -if [[ "x$@" = "x" ]]; then +if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then # All: r, x, g, gu - _run "-rtcinsm" # regular - _run "-rtcinsmz" # regular with reset - _run "-rtcinsmf" # regular with no fastpath (notfastpath) - _run "-xtcinsm" # external - _run "-gxtcinsm" # codecgen: requires external - _run "-gxutcinsm" # codecgen + unsafe + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset + _run "-_tcinsed_ml_f" # regular with no fastpath (notfastpath) + _run "-x_tcinsed_ml" # external + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe elif [[ "x$@" = "x-Z" ]]; then # Regular - _run "-rtcinsm" # regular - _run "-rtcinsmz" # regular with reset + _run "-_tcinsed_ml" # regular + _run "-_tcinsed_ml_z" # regular with reset elif [[ "x$@" = "x-F" ]]; then # regular with notfastpath - _run "-rtcinsmf" # regular - _run "-rtcinsmzf" # regular with reset + _run "-_tcinsed_ml_f" # regular + _run "-_tcinsed_ml_zf" # regular with reset +elif [[ "x$@" = "x-C" ]]; then + # codecgen + _run "-gx_tcinsed_ml" # codecgen: requires external + _run "-gxu_tcinsed_ml" # codecgen + unsafe +elif [[ "x$@" = "x-X" ]]; then + # external + _run "-x_tcinsed_ml" # external +elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then + cat <