From 6d871fb23ecd873cb10cdfc3a8dec5f50d2af8fa Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Sun, 16 Apr 2017 23:27:39 +0100 Subject: [PATCH] remove vendor directory --- vendor/github.com/mailhog/data/LICENSE.md | 21 - vendor/github.com/mailhog/data/README.md | 10 - vendor/github.com/mailhog/data/message.go | 323 -- vendor/gopkg.in/mgo.v2/LICENSE | 25 - vendor/gopkg.in/mgo.v2/Makefile | 5 - vendor/gopkg.in/mgo.v2/README.md | 4 - vendor/gopkg.in/mgo.v2/auth.go | 467 -- vendor/gopkg.in/mgo.v2/bson/LICENSE | 25 - vendor/gopkg.in/mgo.v2/bson/bson.go | 738 --- vendor/gopkg.in/mgo.v2/bson/decimal.go | 310 -- vendor/gopkg.in/mgo.v2/bson/decode.go | 849 --- vendor/gopkg.in/mgo.v2/bson/encode.go | 514 -- vendor/gopkg.in/mgo.v2/bson/json.go | 380 -- vendor/gopkg.in/mgo.v2/bulk.go | 351 -- vendor/gopkg.in/mgo.v2/cluster.go | 682 --- vendor/gopkg.in/mgo.v2/doc.go | 31 - vendor/gopkg.in/mgo.v2/gridfs.go | 761 --- vendor/gopkg.in/mgo.v2/internal/json/LICENSE | 27 - .../gopkg.in/mgo.v2/internal/json/decode.go | 1685 ------ .../gopkg.in/mgo.v2/internal/json/encode.go | 1256 ----- .../mgo.v2/internal/json/extension.go | 95 - vendor/gopkg.in/mgo.v2/internal/json/fold.go | 143 - .../gopkg.in/mgo.v2/internal/json/indent.go | 141 - .../gopkg.in/mgo.v2/internal/json/scanner.go | 697 --- .../gopkg.in/mgo.v2/internal/json/stream.go | 510 -- vendor/gopkg.in/mgo.v2/internal/json/tags.go | 44 - vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c | 77 - vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go | 138 - .../mgo.v2/internal/sasl/sasl_windows.c | 122 - .../mgo.v2/internal/sasl/sasl_windows.go | 142 - .../mgo.v2/internal/sasl/sasl_windows.h | 7 - .../mgo.v2/internal/sasl/sspi_windows.c | 96 - .../mgo.v2/internal/sasl/sspi_windows.h | 70 - .../gopkg.in/mgo.v2/internal/scram/scram.go | 266 - vendor/gopkg.in/mgo.v2/log.go | 133 - vendor/gopkg.in/mgo.v2/queue.go | 91 - vendor/gopkg.in/mgo.v2/raceoff.go | 5 - vendor/gopkg.in/mgo.v2/raceon.go | 5 - vendor/gopkg.in/mgo.v2/saslimpl.go | 11 - vendor/gopkg.in/mgo.v2/saslstub.go | 11 - vendor/gopkg.in/mgo.v2/server.go | 463 -- vendor/gopkg.in/mgo.v2/session.go | 4826 ----------------- vendor/gopkg.in/mgo.v2/socket.go | 707 --- vendor/gopkg.in/mgo.v2/stats.go | 147 - vendor/vendor.json | 43 - 45 files changed, 17454 deletions(-) delete mode 100644 vendor/github.com/mailhog/data/LICENSE.md delete mode 100644 vendor/github.com/mailhog/data/README.md delete mode 100644 vendor/github.com/mailhog/data/message.go delete mode 100644 vendor/gopkg.in/mgo.v2/LICENSE delete mode 100644 vendor/gopkg.in/mgo.v2/Makefile delete mode 100644 vendor/gopkg.in/mgo.v2/README.md delete mode 100644 vendor/gopkg.in/mgo.v2/auth.go delete mode 100644 vendor/gopkg.in/mgo.v2/bson/LICENSE delete mode 100644 vendor/gopkg.in/mgo.v2/bson/bson.go delete mode 100644 vendor/gopkg.in/mgo.v2/bson/decimal.go delete mode 100644 vendor/gopkg.in/mgo.v2/bson/decode.go delete mode 100644 vendor/gopkg.in/mgo.v2/bson/encode.go delete mode 100644 vendor/gopkg.in/mgo.v2/bson/json.go delete mode 100644 vendor/gopkg.in/mgo.v2/bulk.go delete mode 100644 vendor/gopkg.in/mgo.v2/cluster.go delete mode 100644 vendor/gopkg.in/mgo.v2/doc.go delete mode 100644 vendor/gopkg.in/mgo.v2/gridfs.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/LICENSE delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/decode.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/encode.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/extension.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/fold.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/indent.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/scanner.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/stream.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/json/tags.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sasl.c delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sasl.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c delete mode 100644 vendor/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h delete mode 100644 vendor/gopkg.in/mgo.v2/internal/scram/scram.go delete mode 100644 vendor/gopkg.in/mgo.v2/log.go delete mode 100644 vendor/gopkg.in/mgo.v2/queue.go delete mode 100644 vendor/gopkg.in/mgo.v2/raceoff.go delete mode 100644 vendor/gopkg.in/mgo.v2/raceon.go delete mode 100644 vendor/gopkg.in/mgo.v2/saslimpl.go delete mode 100644 vendor/gopkg.in/mgo.v2/saslstub.go delete mode 100644 vendor/gopkg.in/mgo.v2/server.go delete mode 100644 vendor/gopkg.in/mgo.v2/session.go delete mode 100644 vendor/gopkg.in/mgo.v2/socket.go delete mode 100644 vendor/gopkg.in/mgo.v2/stats.go delete mode 100644 vendor/vendor.json diff --git a/vendor/github.com/mailhog/data/LICENSE.md b/vendor/github.com/mailhog/data/LICENSE.md deleted file mode 100644 index 1f0d994..0000000 --- a/vendor/github.com/mailhog/data/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Ian Kent - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mailhog/data/README.md b/vendor/github.com/mailhog/data/README.md deleted file mode 100644 index 3bf0037..0000000 --- a/vendor/github.com/mailhog/data/README.md +++ /dev/null @@ -1,10 +0,0 @@ -MailHog data library [![GoDoc](https://godoc.org/github.com/mailhog/data?status.svg)](https://godoc.org/github.com/mailhog/data) [![Build Status](https://travis-ci.org/mailhog/data.svg?branch=master)](https://travis-ci.org/mailhog/data) -========= - -`github.com/mailhog/data` implements a data library - -### Licence - -Copyright ©‎ 2014-2015, Ian Kent (http://iankent.uk) - -Released under MIT license, see [LICENSE](LICENSE.md) for details. diff --git a/vendor/github.com/mailhog/data/message.go b/vendor/github.com/mailhog/data/message.go deleted file mode 100644 index 951f19a..0000000 --- a/vendor/github.com/mailhog/data/message.go +++ /dev/null @@ -1,323 +0,0 @@ -package data - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "io" - "log" - "mime" - "strings" - "time" -) - -// LogHandler is called for each log message. If nil, log messages will -// be output using log.Printf instead. -var LogHandler func(message string, args ...interface{}) - -func logf(message string, args ...interface{}) { - if LogHandler != nil { - LogHandler(message, args...) - } else { - log.Printf(message, args...) - } -} - -// MessageID represents the ID of an SMTP message including the hostname part -type MessageID string - -// NewMessageID generates a new message ID -func NewMessageID(hostname string) (MessageID, error) { - size := 32 - - rb := make([]byte, size) - _, err := rand.Read(rb) - - if err != nil { - return MessageID(""), err - } - - rs := base64.URLEncoding.EncodeToString(rb) - - return MessageID(rs + "@" + hostname), nil -} - -// Messages represents an array of Messages -// - TODO is this even required? -type Messages []Message - -// Message represents a parsed SMTP message -type Message struct { - ID MessageID - From *Path - To []*Path - Content *Content - Created time.Time - MIME *MIMEBody // FIXME refactor to use Content.MIME - Raw *SMTPMessage -} - -// Path represents an SMTP forward-path or return-path -type Path struct { - Relays []string - Mailbox string - Domain string - Params string -} - -// Content represents the body content of an SMTP message -type Content struct { - Headers map[string][]string - Body string - Size int - MIME *MIMEBody -} - -// SMTPMessage represents a raw SMTP message -type SMTPMessage struct { - From string - To []string - Data string - Helo string -} - -// MIMEBody represents a collection of MIME parts -type MIMEBody struct { - Parts []*Content -} - -// Parse converts a raw SMTP message to a parsed MIME message -func (m *SMTPMessage) Parse(hostname string) *Message { - var arr []*Path - for _, path := range m.To { - arr = append(arr, PathFromString(path)) - } - - id, _ := NewMessageID(hostname) - msg := &Message{ - ID: id, - From: PathFromString(m.From), - To: arr, - Content: ContentFromString(m.Data), - Created: time.Now(), - Raw: m, - } - - if msg.Content.IsMIME() { - logf("Parsing MIME body") - msg.MIME = msg.Content.ParseMIMEBody() - } - - // find headers - var hasMessageID bool - var receivedHeaderName string - var returnPathHeaderName string - - for k := range msg.Content.Headers { - if strings.ToLower(k) == "message-id" { - hasMessageID = true - continue - } - if strings.ToLower(k) == "received" { - receivedHeaderName = k - continue - } - if strings.ToLower(k) == "return-path" { - returnPathHeaderName = k - continue - } - } - - if !hasMessageID { - msg.Content.Headers["Message-ID"] = []string{string(id)} - } - - if len(receivedHeaderName) > 0 { - msg.Content.Headers[receivedHeaderName] = append(msg.Content.Headers[receivedHeaderName], "from "+m.Helo+" by "+hostname+" (MailHog)\r\n id "+string(id)+"; "+time.Now().Format(time.RFC1123Z)) - } else { - msg.Content.Headers["Received"] = []string{"from " + m.Helo + " by " + hostname + " (MailHog)\r\n id " + string(id) + "; " + time.Now().Format(time.RFC1123Z)} - } - - if len(returnPathHeaderName) > 0 { - msg.Content.Headers[returnPathHeaderName] = append(msg.Content.Headers[returnPathHeaderName], "<"+m.From+">") - } else { - msg.Content.Headers["Return-Path"] = []string{"<" + m.From + ">"} - } - - return msg -} - -// Bytes returns an io.Reader containing the raw message data -func (m *SMTPMessage) Bytes() io.Reader { - var b = new(bytes.Buffer) - - b.WriteString("HELO:<" + m.Helo + ">\r\n") - b.WriteString("FROM:<" + m.From + ">\r\n") - for _, t := range m.To { - b.WriteString("TO:<" + t + ">\r\n") - } - b.WriteString("\r\n") - b.WriteString(m.Data) - - return b -} - -// FromBytes returns a SMTPMessage from raw message bytes (as output by SMTPMessage.Bytes()) -func FromBytes(b []byte) *SMTPMessage { - msg := &SMTPMessage{} - for _, l := range strings.Split(string(b), "\n") { - if strings.HasPrefix(l, "HELO:<") { - l = strings.TrimPrefix(l, "HELO:<") - l = strings.TrimSuffix(l, ">\r") - msg.Helo = l - continue - } - if strings.HasPrefix(l, "FROM:<") { - l = strings.TrimPrefix(l, "FROM:<") - l = strings.TrimSuffix(l, ">\r") - msg.From = l - continue - } - if strings.HasPrefix(l, "TO:<") { - l = strings.TrimPrefix(l, "TO:<") - l = strings.TrimSuffix(l, ">\r") - msg.To = append(msg.To, l) - continue - } - msg.Data += l + "\n" - } - return msg -} - -// Bytes returns an io.Reader containing the raw message data -func (m *Message) Bytes() io.Reader { - var b = new(bytes.Buffer) - - for k, vs := range m.Content.Headers { - for _, v := range vs { - b.WriteString(k + ": " + v + "\r\n") - } - } - - b.WriteString("\r\n") - b.WriteString(m.Content.Body) - - return b -} - -// IsMIME detects a valid MIME header -func (content *Content) IsMIME() bool { - header, ok := content.Headers["Content-Type"] - if !ok { - return false - } - return strings.HasPrefix(header[0], "multipart/") -} - -// ParseMIMEBody parses SMTP message content into multiple MIME parts -func (content *Content) ParseMIMEBody() *MIMEBody { - var parts []*Content - - if hdr, ok := content.Headers["Content-Type"]; ok { - if len(hdr) > 0 { - boundary := extractBoundary(hdr[0]) - var p []string - if len(boundary) > 0 { - p = strings.Split(content.Body, "--"+boundary) - logf("Got boundary: %s", boundary) - } else { - logf("Boundary not found: %s", hdr[0]) - } - - for _, s := range p { - if len(s) > 0 { - part := ContentFromString(strings.Trim(s, "\r\n")) - if part.IsMIME() { - logf("Parsing inner MIME body") - part.MIME = part.ParseMIMEBody() - } - parts = append(parts, part) - } - } - } - } - - return &MIMEBody{ - Parts: parts, - } -} - -// PathFromString parses a forward-path or reverse-path into its parts -func PathFromString(path string) *Path { - var relays []string - email := path - if strings.Contains(path, ":") { - x := strings.SplitN(path, ":", 2) - r, e := x[0], x[1] - email = e - relays = strings.Split(r, ",") - } - mailbox, domain := "", "" - if strings.Contains(email, "@") { - x := strings.SplitN(email, "@", 2) - mailbox, domain = x[0], x[1] - } else { - mailbox = email - } - - return &Path{ - Relays: relays, - Mailbox: mailbox, - Domain: domain, - Params: "", // FIXME? - } -} - -// ContentFromString parses SMTP content into separate headers and body -func ContentFromString(data string) *Content { - logf("Parsing Content from string: '%s'", data) - x := strings.SplitN(data, "\r\n\r\n", 2) - h := make(map[string][]string, 0) - - // FIXME this fails if the message content has no headers - specifically, - // if it doesn't contain \r\n\r\n - - if len(x) == 2 { - headers, body := x[0], x[1] - hdrs := strings.Split(headers, "\r\n") - var lastHdr = "" - for _, hdr := range hdrs { - if lastHdr != "" && (strings.HasPrefix(hdr, " ") || strings.HasPrefix(hdr, "\t")) { - h[lastHdr][len(h[lastHdr])-1] = h[lastHdr][len(h[lastHdr])-1] + hdr - } else if strings.Contains(hdr, ": ") { - y := strings.SplitN(hdr, ": ", 2) - key, value := y[0], y[1] - // TODO multiple header fields - h[key] = []string{value} - lastHdr = key - } else if len(hdr) > 0 { - logf("Found invalid header: '%s'", hdr) - } - } - return &Content{ - Size: len(data), - Headers: h, - Body: body, - } - } - return &Content{ - Size: len(data), - Headers: h, - Body: x[0], - } -} - -// extractBoundary extract boundary string in contentType. -// It returns empty string if no valid boundary found -func extractBoundary(contentType string) string { - _, params, err := mime.ParseMediaType(contentType) - if err == nil { - return params["boundary"] - } - return "" -} diff --git a/vendor/gopkg.in/mgo.v2/LICENSE b/vendor/gopkg.in/mgo.v2/LICENSE deleted file mode 100644 index 770c767..0000000 --- a/vendor/gopkg.in/mgo.v2/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -mgo - MongoDB driver for Go - -Copyright (c) 2010-2013 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/mgo.v2/Makefile b/vendor/gopkg.in/mgo.v2/Makefile deleted file mode 100644 index d1027d4..0000000 --- a/vendor/gopkg.in/mgo.v2/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -startdb: - @harness/setup.sh start - -stopdb: - @harness/setup.sh stop diff --git a/vendor/gopkg.in/mgo.v2/README.md b/vendor/gopkg.in/mgo.v2/README.md deleted file mode 100644 index f4e452c..0000000 --- a/vendor/gopkg.in/mgo.v2/README.md +++ /dev/null @@ -1,4 +0,0 @@ -The MongoDB driver for Go -------------------------- - -Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details. diff --git a/vendor/gopkg.in/mgo.v2/auth.go b/vendor/gopkg.in/mgo.v2/auth.go deleted file mode 100644 index dc26e52..0000000 --- a/vendor/gopkg.in/mgo.v2/auth.go +++ /dev/null @@ -1,467 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "errors" - "fmt" - "sync" - - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/internal/scram" -) - -type authCmd struct { - Authenticate int - - Nonce string - User string - Key string -} - -type startSaslCmd struct { - StartSASL int `bson:"startSasl"` -} - -type authResult struct { - ErrMsg string - Ok bool -} - -type getNonceCmd struct { - GetNonce int -} - -type getNonceResult struct { - Nonce string - Err string "$err" - Code int -} - -type logoutCmd struct { - Logout int -} - -type saslCmd struct { - Start int `bson:"saslStart,omitempty"` - Continue int `bson:"saslContinue,omitempty"` - ConversationId int `bson:"conversationId,omitempty"` - Mechanism string `bson:"mechanism,omitempty"` - Payload []byte -} - -type saslResult struct { - Ok bool `bson:"ok"` - NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) - Done bool - - ConversationId int `bson:"conversationId"` - Payload []byte - ErrMsg string -} - -type saslStepper interface { - Step(serverData []byte) (clientData []byte, done bool, err error) - Close() -} - -func (socket *mongoSocket) getNonce() (nonce string, err error) { - socket.Lock() - for socket.cachedNonce == "" && socket.dead == nil { - debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) - socket.gotNonce.Wait() - } - if socket.cachedNonce == "mongos" { - socket.Unlock() - return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") - } - debugf("Socket %p to %s: got nonce", socket, socket.addr) - nonce, err = socket.cachedNonce, socket.dead - socket.cachedNonce = "" - socket.Unlock() - if err != nil { - nonce = "" - } - return -} - -func (socket *mongoSocket) resetNonce() { - debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) - op := &queryOp{} - op.query = &getNonceCmd{GetNonce: 1} - op.collection = "admin.$cmd" - op.limit = -1 - op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - if err != nil { - socket.kill(errors.New("getNonce: "+err.Error()), true) - return - } - result := &getNonceResult{} - err = bson.Unmarshal(docData, &result) - if err != nil { - socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) - return - } - debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) - if result.Code == 13390 { - // mongos doesn't yet support auth (see http://j.mp/mongos-auth) - result.Nonce = "mongos" - } else if result.Nonce == "" { - var msg string - if result.Err != "" { - msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) - } else { - msg = "Got an empty nonce" - } - socket.kill(errors.New(msg), true) - return - } - socket.Lock() - if socket.cachedNonce != "" { - socket.Unlock() - panic("resetNonce: nonce already cached") - } - socket.cachedNonce = result.Nonce - socket.gotNonce.Signal() - socket.Unlock() - } - err := socket.Query(op) - if err != nil { - socket.kill(errors.New("resetNonce: "+err.Error()), true) - } -} - -func (socket *mongoSocket) Login(cred Credential) error { - socket.Lock() - if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { - cred.Mechanism = "SCRAM-SHA-1" - } - for _, sockCred := range socket.creds { - if sockCred == cred { - debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) - socket.Unlock() - return nil - } - } - if socket.dropLogout(cred) { - debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - } - socket.Unlock() - - debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) - - var err error - switch cred.Mechanism { - case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. - err = socket.loginClassic(cred) - case "PLAIN": - err = socket.loginPlain(cred) - case "MONGODB-X509": - err = socket.loginX509(cred) - default: - // Try SASL for everything else, if it is available. - err = socket.loginSASL(cred) - } - - if err != nil { - debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) - } else { - debugf("Socket %p to %s: login successful", socket, socket.addr) - } - return err -} - -func (socket *mongoSocket) loginClassic(cred Credential) error { - // Note that this only works properly because this function is - // synchronous, which means the nonce won't get reset while we're - // using it and any other login requests will block waiting for a - // new nonce provided in the defer call below. - nonce, err := socket.getNonce() - if err != nil { - return err - } - defer socket.resetNonce() - - psum := md5.New() - psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) - - ksum := md5.New() - ksum.Write([]byte(nonce + cred.Username)) - ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) - - key := hex.EncodeToString(ksum.Sum(nil)) - - cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -type authX509Cmd struct { - Authenticate int - User string - Mechanism string -} - -func (socket *mongoSocket) loginX509(cred Credential) error { - cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -func (socket *mongoSocket) loginPlain(cred Credential) error { - cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} - res := authResult{} - return socket.loginRun(cred.Source, &cmd, &res, func() error { - if !res.Ok { - return errors.New(res.ErrMsg) - } - socket.Lock() - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - socket.Unlock() - return nil - }) -} - -func (socket *mongoSocket) loginSASL(cred Credential) error { - var sasl saslStepper - var err error - if cred.Mechanism == "SCRAM-SHA-1" { - // SCRAM is handled without external libraries. - sasl = saslNewScram(cred) - } else if len(cred.ServiceHost) > 0 { - sasl, err = saslNew(cred, cred.ServiceHost) - } else { - sasl, err = saslNew(cred, socket.Server().Addr) - } - if err != nil { - return err - } - defer sasl.Close() - - // The goal of this logic is to carry a locked socket until the - // local SASL step confirms the auth is valid; the socket needs to be - // locked so that concurrent action doesn't leave the socket in an - // auth state that doesn't reflect the operations that took place. - // As a simple case, imagine inverting login=>logout to logout=>login. - // - // The logic below works because the lock func isn't called concurrently. - locked := false - lock := func(b bool) { - if locked != b { - locked = b - if b { - socket.Lock() - } else { - socket.Unlock() - } - } - } - - lock(true) - defer lock(false) - - start := 1 - cmd := saslCmd{} - res := saslResult{} - for { - payload, done, err := sasl.Step(res.Payload) - if err != nil { - return err - } - if done && res.Done { - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - break - } - lock(false) - - cmd = saslCmd{ - Start: start, - Continue: 1 - start, - ConversationId: res.ConversationId, - Mechanism: cred.Mechanism, - Payload: payload, - } - start = 0 - err = socket.loginRun(cred.Source, &cmd, &res, func() error { - // See the comment on lock for why this is necessary. - lock(true) - if !res.Ok || res.NotOk { - return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) - } - return nil - }) - if err != nil { - return err - } - if done && res.Done { - socket.dropAuth(cred.Source) - socket.creds = append(socket.creds, cred) - break - } - } - - return nil -} - -func saslNewScram(cred Credential) *saslScram { - credsum := md5.New() - credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) - client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) - return &saslScram{cred: cred, client: client} -} - -type saslScram struct { - cred Credential - client *scram.Client -} - -func (s *saslScram) Close() {} - -func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { - more := s.client.Step(serverData) - return s.client.Out(), !more, s.client.Err() -} - -func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { - var mutex sync.Mutex - var replyErr error - mutex.Lock() - - op := queryOp{} - op.query = query - op.collection = db + ".$cmd" - op.limit = -1 - op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - defer mutex.Unlock() - - if err != nil { - replyErr = err - return - } - - err = bson.Unmarshal(docData, result) - if err != nil { - replyErr = err - } else { - // Must handle this within the read loop for the socket, so - // that concurrent login requests are properly ordered. - replyErr = f() - } - } - - err := socket.Query(&op) - if err != nil { - return err - } - mutex.Lock() // Wait. - return replyErr -} - -func (socket *mongoSocket) Logout(db string) { - socket.Lock() - cred, found := socket.dropAuth(db) - if found { - debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) - socket.logout = append(socket.logout, cred) - } - socket.Unlock() -} - -func (socket *mongoSocket) LogoutAll() { - socket.Lock() - if l := len(socket.creds); l > 0 { - debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) - socket.logout = append(socket.logout, socket.creds...) - socket.creds = socket.creds[0:0] - } - socket.Unlock() -} - -func (socket *mongoSocket) flushLogout() (ops []interface{}) { - socket.Lock() - if l := len(socket.logout); l > 0 { - debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) - for i := 0; i != l; i++ { - op := queryOp{} - op.query = &logoutCmd{1} - op.collection = socket.logout[i].Source + ".$cmd" - op.limit = -1 - ops = append(ops, &op) - } - socket.logout = socket.logout[0:0] - } - socket.Unlock() - return -} - -func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { - for i, sockCred := range socket.creds { - if sockCred.Source == db { - copy(socket.creds[i:], socket.creds[i+1:]) - socket.creds = socket.creds[:len(socket.creds)-1] - return sockCred, true - } - } - return cred, false -} - -func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { - for i, sockCred := range socket.logout { - if sockCred == cred { - copy(socket.logout[i:], socket.logout[i+1:]) - socket.logout = socket.logout[:len(socket.logout)-1] - return true - } - } - return false -} diff --git a/vendor/gopkg.in/mgo.v2/bson/LICENSE b/vendor/gopkg.in/mgo.v2/bson/LICENSE deleted file mode 100644 index 8903260..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -BSON library for Go - -Copyright (c) 2010-2012 - Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/mgo.v2/bson/bson.go b/vendor/gopkg.in/mgo.v2/bson/bson.go deleted file mode 100644 index 7fb7f8c..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/bson.go +++ /dev/null @@ -1,738 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package bson is an implementation of the BSON specification for Go: -// -// http://bsonspec.org -// -// It was created as part of the mgo MongoDB driver for Go, but is standalone -// and may be used on its own without the driver. -package bson - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "encoding/binary" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" -) - -// -------------------------------------------------------------------------- -// The public API. - -// A value implementing the bson.Getter interface will have its GetBSON -// method called when the given value has to be marshalled, and the result -// of this method will be marshaled in place of the actual object. -// -// If GetBSON returns return a non-nil error, the marshalling procedure -// will stop and error out with the provided value. -type Getter interface { - GetBSON() (interface{}, error) -} - -// A value implementing the bson.Setter interface will receive the BSON -// value via the SetBSON method during unmarshaling, and the object -// itself will not be changed as usual. -// -// If setting the value works, the method should return nil or alternatively -// bson.SetZero to set the respective field to its zero value (nil for -// pointer types). If SetBSON returns a value of type bson.TypeError, the -// BSON value will be omitted from a map or slice being decoded and the -// unmarshalling will continue. If it returns any other non-nil error, the -// unmarshalling procedure will stop and error out with the provided value. -// -// This interface is generally useful in pointer receivers, since the method -// will want to change the receiver. A type field that implements the Setter -// interface doesn't have to be a pointer, though. -// -// Unlike the usual behavior, unmarshalling onto a value that implements a -// Setter interface will NOT reset the value to its zero state. This allows -// the value to decide by itself how to be unmarshalled. -// -// For example: -// -// type MyString string -// -// func (s *MyString) SetBSON(raw bson.Raw) error { -// return raw.Unmarshal(s) -// } -// -type Setter interface { - SetBSON(raw Raw) error -} - -// SetZero may be returned from a SetBSON method to have the value set to -// its respective zero value. When used in pointer values, this will set the -// field to nil rather than to the pre-allocated value. -var SetZero = errors.New("set to zero") - -// M is a convenient alias for a map[string]interface{} map, useful for -// dealing with BSON in a native way. For instance: -// -// bson.M{"a": 1, "b": true} -// -// There's no special handling for this type in addition to what's done anyway -// for an equivalent map type. Elements in the map will be dumped in an -// undefined ordered. See also the bson.D type for an ordered alternative. -type M map[string]interface{} - -// D represents a BSON document containing ordered elements. For example: -// -// bson.D{{"a", 1}, {"b", true}} -// -// In some situations, such as when creating indexes for MongoDB, the order in -// which the elements are defined is important. If the order is not important, -// using a map is generally more comfortable. See bson.M and bson.RawD. -type D []DocElem - -// DocElem is an element of the bson.D document representation. -type DocElem struct { - Name string - Value interface{} -} - -// Map returns a map out of the ordered element name/value pairs in d. -func (d D) Map() (m M) { - m = make(M, len(d)) - for _, item := range d { - m[item.Name] = item.Value - } - return m -} - -// The Raw type represents raw unprocessed BSON documents and elements. -// Kind is the kind of element as defined per the BSON specification, and -// Data is the raw unprocessed data for the respective element. -// Using this type it is possible to unmarshal or marshal values partially. -// -// Relevant documentation: -// -// http://bsonspec.org/#/specification -// -type Raw struct { - Kind byte - Data []byte -} - -// RawD represents a BSON document containing raw unprocessed elements. -// This low-level representation may be useful when lazily processing -// documents of uncertain content, or when manipulating the raw content -// documents in general. -type RawD []RawDocElem - -// See the RawD type. -type RawDocElem struct { - Name string - Value Raw -} - -// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes -// long. MongoDB objects by default have such a property set in their "_id" -// property. -// -// http://www.mongodb.org/display/DOCS/Object+IDs -type ObjectId string - -// ObjectIdHex returns an ObjectId from the provided hex representation. -// Calling this function with an invalid hex representation will -// cause a runtime panic. See the IsObjectIdHex function. -func ObjectIdHex(s string) ObjectId { - d, err := hex.DecodeString(s) - if err != nil || len(d) != 12 { - panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s)) - } - return ObjectId(d) -} - -// IsObjectIdHex returns whether s is a valid hex representation of -// an ObjectId. See the ObjectIdHex function. -func IsObjectIdHex(s string) bool { - if len(s) != 24 { - return false - } - _, err := hex.DecodeString(s) - return err == nil -} - -// objectIdCounter is atomically incremented when generating a new ObjectId -// using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter uint32 = readRandomUint32() - -// readRandomUint32 returns a random objectIdCounter. -func readRandomUint32() uint32 { - var b [4]byte - _, err := io.ReadFull(rand.Reader, b[:]) - if err != nil { - panic(fmt.Errorf("cannot read random object id: %v", err)) - } - return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) -} - -// machineId stores machine id generated once and used in subsequent calls -// to NewObjectId function. -var machineId = readMachineId() -var processId = os.Getpid() - -// readMachineId generates and returns a machine id. -// If this function fails to get the hostname it will cause a runtime error. -func readMachineId() []byte { - var sum [3]byte - id := sum[:] - hostname, err1 := os.Hostname() - if err1 != nil { - _, err2 := io.ReadFull(rand.Reader, id) - if err2 != nil { - panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) - } - return id - } - hw := md5.New() - hw.Write([]byte(hostname)) - copy(id, hw.Sum(nil)) - return id -} - -// NewObjectId returns a new unique ObjectId. -func NewObjectId() ObjectId { - var b [12]byte - // Timestamp, 4 bytes, big endian - binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) - // Machine, first 3 bytes of md5(hostname) - b[4] = machineId[0] - b[5] = machineId[1] - b[6] = machineId[2] - // Pid, 2 bytes, specs don't specify endianness, but we use big endian. - b[7] = byte(processId >> 8) - b[8] = byte(processId) - // Increment, 3 bytes, big endian - i := atomic.AddUint32(&objectIdCounter, 1) - b[9] = byte(i >> 16) - b[10] = byte(i >> 8) - b[11] = byte(i) - return ObjectId(b[:]) -} - -// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled -// with the provided number of seconds from epoch UTC, and all other parts -// filled with zeroes. It's not safe to insert a document with an id generated -// by this method, it is useful only for queries to find documents with ids -// generated before or after the specified timestamp. -func NewObjectIdWithTime(t time.Time) ObjectId { - var b [12]byte - binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) - return ObjectId(string(b[:])) -} - -// String returns a hex string representation of the id. -// Example: ObjectIdHex("4d88e15b60f486e428412dc9"). -func (id ObjectId) String() string { - return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) -} - -// Hex returns a hex representation of the ObjectId. -func (id ObjectId) Hex() string { - return hex.EncodeToString([]byte(id)) -} - -// MarshalJSON turns a bson.ObjectId into a json.Marshaller. -func (id ObjectId) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%x"`, string(id))), nil -} - -var nullBytes = []byte("null") - -// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. -func (id *ObjectId) UnmarshalJSON(data []byte) error { - if len(data) > 0 && (data[0] == '{' || data[0] == 'O') { - var v struct { - Id json.RawMessage `json:"$oid"` - Func struct { - Id json.RawMessage - } `json:"$oidFunc"` - } - err := jdec(data, &v) - if err == nil { - if len(v.Id) > 0 { - data = []byte(v.Id) - } else { - data = []byte(v.Func.Id) - } - } - } - if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { - *id = "" - return nil - } - if len(data) != 26 || data[0] != '"' || data[25] != '"' { - return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data))) - } - var buf [12]byte - _, err := hex.Decode(buf[:], data[1:25]) - if err != nil { - return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err)) - } - *id = ObjectId(string(buf[:])) - return nil -} - -// MarshalText turns bson.ObjectId into an encoding.TextMarshaler. -func (id ObjectId) MarshalText() ([]byte, error) { - return []byte(fmt.Sprintf("%x", string(id))), nil -} - -// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler. -func (id *ObjectId) UnmarshalText(data []byte) error { - if len(data) == 1 && data[0] == ' ' || len(data) == 0 { - *id = "" - return nil - } - if len(data) != 24 { - return fmt.Errorf("invalid ObjectId: %s", data) - } - var buf [12]byte - _, err := hex.Decode(buf[:], data[:]) - if err != nil { - return fmt.Errorf("invalid ObjectId: %s (%s)", data, err) - } - *id = ObjectId(string(buf[:])) - return nil -} - -// Valid returns true if id is valid. A valid id must contain exactly 12 bytes. -func (id ObjectId) Valid() bool { - return len(id) == 12 -} - -// byteSlice returns byte slice of id from start to end. -// Calling this function with an invalid id will cause a runtime panic. -func (id ObjectId) byteSlice(start, end int) []byte { - if len(id) != 12 { - panic(fmt.Sprintf("invalid ObjectId: %q", string(id))) - } - return []byte(string(id)[start:end]) -} - -// Time returns the timestamp part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Time() time.Time { - // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. - secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) - return time.Unix(secs, 0) -} - -// Machine returns the 3-byte machine id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Machine() []byte { - return id.byteSlice(4, 7) -} - -// Pid returns the process id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Pid() uint16 { - return binary.BigEndian.Uint16(id.byteSlice(7, 9)) -} - -// Counter returns the incrementing value part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ObjectId) Counter() int32 { - b := id.byteSlice(9, 12) - // Counter is stored as big-endian 3-byte value - return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) -} - -// The Symbol type is similar to a string and is used in languages with a -// distinct symbol type. -type Symbol string - -// Now returns the current time with millisecond precision. MongoDB stores -// timestamps with the same precision, so a Time returned from this method -// will not change after a roundtrip to the database. That's the only reason -// why this function exists. Using the time.Now function also works fine -// otherwise. -func Now() time.Time { - return time.Unix(0, time.Now().UnixNano()/1e6*1e6) -} - -// MongoTimestamp is a special internal type used by MongoDB that for some -// strange reason has its own datatype defined in BSON. -type MongoTimestamp int64 - -type orderKey int64 - -// MaxKey is a special value that compares higher than all other possible BSON -// values in a MongoDB database. -var MaxKey = orderKey(1<<63 - 1) - -// MinKey is a special value that compares lower than all other possible BSON -// values in a MongoDB database. -var MinKey = orderKey(-1 << 63) - -type undefined struct{} - -// Undefined represents the undefined BSON value. -var Undefined undefined - -// Binary is a representation for non-standard binary values. Any kind should -// work, but the following are known as of this writing: -// -// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. -// 0x01 - Function (!?) -// 0x02 - Obsolete generic. -// 0x03 - UUID -// 0x05 - MD5 -// 0x80 - User defined. -// -type Binary struct { - Kind byte - Data []byte -} - -// RegEx represents a regular expression. The Options field may contain -// individual characters defining the way in which the pattern should be -// applied, and must be sorted. Valid options as of this writing are 'i' for -// case insensitive matching, 'm' for multi-line matching, 'x' for verbose -// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all -// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match -// unicode. The value of the Options parameter is not verified before being -// marshaled into the BSON format. -type RegEx struct { - Pattern string - Options string -} - -// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it -// will be marshaled as a mapping from identifiers to values that may be -// used when evaluating the provided Code. -type JavaScript struct { - Code string - Scope interface{} -} - -// DBPointer refers to a document id in a namespace. -// -// This type is deprecated in the BSON specification and should not be used -// except for backwards compatibility with ancient applications. -type DBPointer struct { - Namespace string - Id ObjectId -} - -const initialBufferSize = 64 - -func handleErr(err *error) { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } else if _, ok := r.(externalPanic); ok { - panic(r) - } else if s, ok := r.(string); ok { - *err = errors.New(s) - } else if e, ok := r.(error); ok { - *err = e - } else { - panic(r) - } - } -} - -// Marshal serializes the in value, which may be a map or a struct value. -// In the case of struct values, only exported fields will be serialized, -// and the order of serialized fields will match that of the struct itself. -// The lowercased field name is used as the key for each exported field, -// but this behavior may be changed using the respective field tag. -// The tag may also contain flags to tweak the marshalling behavior for -// the field. The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// -// minsize Marshal an int64 value as an int32, if that's feasible -// while preserving the numeric value. -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the bson keys of other struct fields. -// -// Some examples: -// -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := &encoder{make([]byte, 0, initialBufferSize)} - e.addDoc(reflect.ValueOf(in)) - return e.out, nil -} - -// Unmarshal deserializes data from in into the out value. The out value -// must be a map, a pointer to a struct, or a pointer to a bson.D value. -// In the case of struct values, only exported fields will be deserialized. -// The lowercased field name is used as the key for each exported field, -// but this behavior may be changed using the respective field tag. -// The tag may also contain flags to tweak the marshalling behavior for -// the field. The tag formats accepted are: -// -// "[][,[,]]" -// -// `(...) bson:"[][,[,]]" (...)` -// -// The following flags are currently supported during unmarshal (see the -// Marshal method for other flags): -// -// inline Inline the field, which must be a struct or a map. -// Inlined structs are handled as if its fields were part -// of the outer struct. An inlined map causes keys that do -// not match any other struct field to be inserted in the -// map rather than being discarded as usual. -// -// The target field or element types of out may not necessarily match -// the BSON values of the provided data. The following conversions are -// made automatically: -// -// - Numeric types are converted if at least the integer part of the -// value would be preserved correctly -// - Bools are converted to numeric types as 1 or 0 -// - Numeric types are converted to bools as true if not 0 or false otherwise -// - Binary and string BSON data is converted to a string, array or byte slice -// -// If the value would not fit the type and cannot be converted, it's -// silently skipped. -// -// Pointer values are initialized when necessary. -func Unmarshal(in []byte, out interface{}) (err error) { - if raw, ok := out.(*Raw); ok { - raw.Kind = 3 - raw.Data = in - return nil - } - defer handleErr(&err) - v := reflect.ValueOf(out) - switch v.Kind() { - case reflect.Ptr: - fallthrough - case reflect.Map: - d := newDecoder(in) - d.readDocTo(v) - case reflect.Struct: - return errors.New("Unmarshal can't deal with struct values. Use a pointer.") - default: - return errors.New("Unmarshal needs a map or a pointer to a struct.") - } - return nil -} - -// Unmarshal deserializes raw into the out value. If the out value type -// is not compatible with raw, a *bson.TypeError is returned. -// -// See the Unmarshal function documentation for more details on the -// unmarshalling process. -func (raw Raw) Unmarshal(out interface{}) (err error) { - defer handleErr(&err) - v := reflect.ValueOf(out) - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - fallthrough - case reflect.Map: - d := newDecoder(raw.Data) - good := d.readElemTo(v, raw.Kind) - if !good { - return &TypeError{v.Type(), raw.Kind} - } - case reflect.Struct: - return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.") - default: - return errors.New("Raw Unmarshal needs a map or a valid pointer.") - } - return nil -} - -type TypeError struct { - Type reflect.Type - Kind byte -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - InlineMap int - Zero reflect.Value -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - MinSize bool - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var structMapMutex sync.RWMutex - -type externalPanic string - -func (e externalPanic) String() string { - return string(e) -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - structMapMutex.RLock() - sinfo, found := structMap[st] - structMapMutex.RUnlock() - if found { - return sinfo, nil - } - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("bson") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "minsize": - info.MinSize = true - case "inline": - inline = true - default: - msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) - panic(externalPanic(msg)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("Multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct: - sinfo, err := getStructInfo(field.Type) - if err != nil { - return nil, err - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - default: - panic("Option ,inline needs a struct value or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "Duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - sinfo = &structInfo{ - fieldsMap, - fieldsList, - inlineMap, - reflect.New(st).Elem(), - } - structMapMutex.Lock() - structMap[st] = sinfo - structMapMutex.Unlock() - return sinfo, nil -} diff --git a/vendor/gopkg.in/mgo.v2/bson/decimal.go b/vendor/gopkg.in/mgo.v2/bson/decimal.go deleted file mode 100644 index 3d2f700..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/decimal.go +++ /dev/null @@ -1,310 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package bson - -import ( - "fmt" - "strconv" - "strings" -) - -// Decimal128 holds decimal128 BSON values. -type Decimal128 struct { - h, l uint64 -} - -func (d Decimal128) String() string { - var pos int // positive sign - var e int // exponent - var h, l uint64 // significand high/low - - if d.h>>63&1 == 0 { - pos = 1 - } - - switch d.h >> 58 & (1<<5 - 1) { - case 0x1F: - return "NaN" - case 0x1E: - return "-Inf"[pos:] - } - - l = d.l - if d.h>>61&3 == 3 { - // Bits: 1*sign 2*ignored 14*exponent 111*significand. - // Implicit 0b100 prefix in significand. - e = int(d.h>>47&(1<<14-1)) - 6176 - //h = 4<<47 | d.h&(1<<47-1) - // Spec says all of these values are out of range. - h, l = 0, 0 - } else { - // Bits: 1*sign 14*exponent 113*significand - e = int(d.h>>49&(1<<14-1)) - 6176 - h = d.h & (1<<49 - 1) - } - - // Would be handled by the logic below, but that's trivial and common. - if h == 0 && l == 0 && e == 0 { - return "-0"[pos:] - } - - var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. - var last = len(repr) - var i = len(repr) - var dot = len(repr) + e - var rem uint32 -Loop: - for d9 := 0; d9 < 5; d9++ { - h, l, rem = divmod(h, l, 1e9) - for d1 := 0; d1 < 9; d1++ { - // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. - if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) { - e += len(repr) - i - i-- - repr[i] = '.' - last = i - 1 - dot = len(repr) // Unmark. - } - c := '0' + byte(rem%10) - rem /= 10 - i-- - repr[i] = c - // Handle "0E+3", "1E+3", etc. - if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) { - last = i - break Loop - } - if c != '0' { - last = i - } - // Break early. Works without it, but why. - if dot > i && l == 0 && h == 0 && rem == 0 { - break Loop - } - } - } - repr[last-1] = '-' - last-- - - if e > 0 { - return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) - } - if e < 0 { - return string(repr[last+pos:]) + "E" + strconv.Itoa(e) - } - return string(repr[last+pos:]) -} - -func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { - div64 := uint64(div) - a := h >> 32 - aq := a / div64 - ar := a % div64 - b := ar<<32 + h&(1<<32-1) - bq := b / div64 - br := b % div64 - c := br<<32 + l>>32 - cq := c / div64 - cr := c % div64 - d := cr<<32 + l&(1<<32-1) - dq := d / div64 - dr := d % div64 - return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) -} - -var dNaN = Decimal128{0x1F << 58, 0} -var dPosInf = Decimal128{0x1E << 58, 0} -var dNegInf = Decimal128{0x3E << 58, 0} - -func dErr(s string) (Decimal128, error) { - return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) -} - -func ParseDecimal128(s string) (Decimal128, error) { - orig := s - if s == "" { - return dErr(orig) - } - neg := s[0] == '-' - if neg || s[0] == '+' { - s = s[1:] - } - - if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { - if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { - return dNaN, nil - } - if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { - if neg { - return dNegInf, nil - } - return dPosInf, nil - } - return dErr(orig) - } - - var h, l uint64 - var e int - - var add, ovr uint32 - var mul uint32 = 1 - var dot = -1 - var digits = 0 - var i = 0 - for i < len(s) { - c := s[i] - if mul == 1e9 { - h, l, ovr = muladd(h, l, mul, add) - mul, add = 1, 0 - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if c >= '0' && c <= '9' { - i++ - if c > '0' || digits > 0 { - digits++ - } - if digits > 34 { - if c == '0' { - // Exact rounding. - e++ - continue - } - return dErr(orig) - } - mul *= 10 - add *= 10 - add += uint32(c - '0') - continue - } - if c == '.' { - i++ - if dot >= 0 || i == 1 && len(s) == 1 { - return dErr(orig) - } - if i == len(s) { - break - } - if s[i] < '0' || s[i] > '9' || e > 0 { - return dErr(orig) - } - dot = i - continue - } - break - } - if i == 0 { - return dErr(orig) - } - if mul > 1 { - h, l, ovr = muladd(h, l, mul, add) - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if dot >= 0 { - e += dot - i - } - if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { - i++ - eneg := s[i] == '-' - if eneg || s[i] == '+' { - i++ - if i == len(s) { - return dErr(orig) - } - } - n := 0 - for i < len(s) && n < 1e4 { - c := s[i] - i++ - if c < '0' || c > '9' { - return dErr(orig) - } - n *= 10 - n += int(c - '0') - } - if eneg { - n = -n - } - e += n - for e < -6176 { - // Subnormal. - var div uint32 = 1 - for div < 1e9 && e < -6176 { - div *= 10 - e++ - } - var rem uint32 - h, l, rem = divmod(h, l, div) - if rem > 0 { - return dErr(orig) - } - } - for e > 6111 { - // Clamped. - var mul uint32 = 1 - for mul < 1e9 && e > 6111 { - mul *= 10 - e-- - } - h, l, ovr = muladd(h, l, mul, 0) - if ovr > 0 || h&((1<<15-1)<<49) > 0 { - return dErr(orig) - } - } - if e < -6176 || e > 6111 { - return dErr(orig) - } - } - - if i < len(s) { - return dErr(orig) - } - - h |= uint64(e+6176) & uint64(1<<14-1) << 49 - if neg { - h |= 1 << 63 - } - return Decimal128{h, l}, nil -} - -func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { - mul64 := uint64(mul) - a := mul64 * (l & (1<<32 - 1)) - b := a>>32 + mul64*(l>>32) - c := b>>32 + mul64*(h&(1<<32-1)) - d := c>>32 + mul64*(h>>32) - - a = a&(1<<32-1) + uint64(add) - b = b&(1<<32-1) + a>>32 - c = c&(1<<32-1) + b>>32 - d = d&(1<<32-1) + c>>32 - - return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) -} diff --git a/vendor/gopkg.in/mgo.v2/bson/decode.go b/vendor/gopkg.in/mgo.v2/bson/decode.go deleted file mode 100644 index 7c2d841..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/decode.go +++ /dev/null @@ -1,849 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson - -import ( - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "sync" - "time" -) - -type decoder struct { - in []byte - i int - docType reflect.Type -} - -var typeM = reflect.TypeOf(M{}) - -func newDecoder(in []byte) *decoder { - return &decoder{in, 0, typeM} -} - -// -------------------------------------------------------------------------- -// Some helper functions. - -func corrupted() { - panic("Document is corrupted") -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -// -------------------------------------------------------------------------- -// Unmarshaling of documents. - -const ( - setterUnknown = iota - setterNone - setterType - setterAddr -) - -var setterStyles map[reflect.Type]int -var setterIface reflect.Type -var setterMutex sync.RWMutex - -func init() { - var iface Setter - setterIface = reflect.TypeOf(&iface).Elem() - setterStyles = make(map[reflect.Type]int) -} - -func setterStyle(outt reflect.Type) int { - setterMutex.RLock() - style := setterStyles[outt] - setterMutex.RUnlock() - if style == setterUnknown { - setterMutex.Lock() - defer setterMutex.Unlock() - if outt.Implements(setterIface) { - setterStyles[outt] = setterType - } else if reflect.PtrTo(outt).Implements(setterIface) { - setterStyles[outt] = setterAddr - } else { - setterStyles[outt] = setterNone - } - style = setterStyles[outt] - } - return style -} - -func getSetter(outt reflect.Type, out reflect.Value) Setter { - style := setterStyle(outt) - if style == setterNone { - return nil - } - if style == setterAddr { - if !out.CanAddr() { - return nil - } - out = out.Addr() - } else if outt.Kind() == reflect.Ptr && out.IsNil() { - out.Set(reflect.New(outt.Elem())) - } - return out.Interface().(Setter) -} - -func clearMap(m reflect.Value) { - var none reflect.Value - for _, k := range m.MapKeys() { - m.SetMapIndex(k, none) - } -} - -func (d *decoder) readDocTo(out reflect.Value) { - var elemType reflect.Type - outt := out.Type() - outk := outt.Kind() - - for { - if outk == reflect.Ptr && out.IsNil() { - out.Set(reflect.New(outt.Elem())) - } - if setter := getSetter(outt, out); setter != nil { - var raw Raw - d.readDocTo(reflect.ValueOf(&raw)) - err := setter.SetBSON(raw) - if _, ok := err.(*TypeError); err != nil && !ok { - panic(err) - } - return - } - if outk == reflect.Ptr { - out = out.Elem() - outt = out.Type() - outk = out.Kind() - continue - } - break - } - - var fieldsMap map[string]fieldInfo - var inlineMap reflect.Value - start := d.i - - origout := out - if outk == reflect.Interface { - if d.docType.Kind() == reflect.Map { - mv := reflect.MakeMap(d.docType) - out.Set(mv) - out = mv - } else { - dv := reflect.New(d.docType).Elem() - out.Set(dv) - out = dv - } - outt = out.Type() - outk = outt.Kind() - } - - docType := d.docType - keyType := typeString - convertKey := false - switch outk { - case reflect.Map: - keyType = outt.Key() - if keyType.Kind() != reflect.String { - panic("BSON map must have string keys. Got: " + outt.String()) - } - if keyType != typeString { - convertKey = true - } - elemType = outt.Elem() - if elemType == typeIface { - d.docType = outt - } - if out.IsNil() { - out.Set(reflect.MakeMap(out.Type())) - } else if out.Len() > 0 { - clearMap(out) - } - case reflect.Struct: - if outt != typeRaw { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - fieldsMap = sinfo.FieldsMap - out.Set(sinfo.Zero) - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - if !inlineMap.IsNil() && inlineMap.Len() > 0 { - clearMap(inlineMap) - } - elemType = inlineMap.Type().Elem() - if elemType == typeIface { - d.docType = inlineMap.Type() - } - } - } - case reflect.Slice: - switch outt.Elem() { - case typeDocElem: - origout.Set(d.readDocElems(outt)) - return - case typeRawDocElem: - origout.Set(d.readRawDocElems(outt)) - return - } - fallthrough - default: - panic("Unsupported document type for unmarshalling: " + out.Type().String()) - } - - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - name := d.readCStr() - if d.i >= end { - corrupted() - } - - switch outk { - case reflect.Map: - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - k := reflect.ValueOf(name) - if convertKey { - k = k.Convert(keyType) - } - out.SetMapIndex(k, e) - } - case reflect.Struct: - if outt == typeRaw { - d.dropElem(kind) - } else { - if info, ok := fieldsMap[name]; ok { - if info.Inline == nil { - d.readElemTo(out.Field(info.Num), kind) - } else { - d.readElemTo(out.FieldByIndex(info.Inline), kind) - } - } else if inlineMap.IsValid() { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - inlineMap.SetMapIndex(reflect.ValueOf(name), e) - } - } else { - d.dropElem(kind) - } - } - case reflect.Slice: - } - - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } - d.docType = docType - - if outt == typeRaw { - out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]})) - } -} - -func (d *decoder) readArrayDocTo(out reflect.Value) { - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - i := 0 - l := out.Len() - for d.in[d.i] != '\x00' { - if i >= l { - panic("Length mismatch on array field") - } - kind := d.readByte() - for d.i < end && d.in[d.i] != '\x00' { - d.i++ - } - if d.i >= end { - corrupted() - } - d.i++ - d.readElemTo(out.Index(i), kind) - if d.i >= end { - corrupted() - } - i++ - } - if i != l { - panic("Length mismatch on array field") - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } -} - -func (d *decoder) readSliceDoc(t reflect.Type) interface{} { - tmp := make([]reflect.Value, 0, 8) - elemType := t.Elem() - if elemType == typeRawDocElem { - d.dropElem(0x04) - return reflect.Zero(t).Interface() - } - - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - for d.i < end && d.in[d.i] != '\x00' { - d.i++ - } - if d.i >= end { - corrupted() - } - d.i++ - e := reflect.New(elemType).Elem() - if d.readElemTo(e, kind) { - tmp = append(tmp, e) - } - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } - - n := len(tmp) - slice := reflect.MakeSlice(t, n, n) - for i := 0; i != n; i++ { - slice.Index(i).Set(tmp[i]) - } - return slice.Interface() -} - -var typeSlice = reflect.TypeOf([]interface{}{}) -var typeIface = typeSlice.Elem() - -func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { - docType := d.docType - d.docType = typ - slice := make([]DocElem, 0, 8) - d.readDocWith(func(kind byte, name string) { - e := DocElem{Name: name} - v := reflect.ValueOf(&e.Value) - if d.readElemTo(v.Elem(), kind) { - slice = append(slice, e) - } - }) - slicev := reflect.New(typ).Elem() - slicev.Set(reflect.ValueOf(slice)) - d.docType = docType - return slicev -} - -func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { - docType := d.docType - d.docType = typ - slice := make([]RawDocElem, 0, 8) - d.readDocWith(func(kind byte, name string) { - e := RawDocElem{Name: name} - v := reflect.ValueOf(&e.Value) - if d.readElemTo(v.Elem(), kind) { - slice = append(slice, e) - } - }) - slicev := reflect.New(typ).Elem() - slicev.Set(reflect.ValueOf(slice)) - d.docType = docType - return slicev -} - -func (d *decoder) readDocWith(f func(kind byte, name string)) { - end := int(d.readInt32()) - end += d.i - 4 - if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { - corrupted() - } - for d.in[d.i] != '\x00' { - kind := d.readByte() - name := d.readCStr() - if d.i >= end { - corrupted() - } - f(kind, name) - if d.i >= end { - corrupted() - } - } - d.i++ // '\x00' - if d.i != end { - corrupted() - } -} - -// -------------------------------------------------------------------------- -// Unmarshaling of individual elements within a document. - -var blackHole = settableValueOf(struct{}{}) - -func (d *decoder) dropElem(kind byte) { - d.readElemTo(blackHole, kind) -} - -// Attempt to decode an element from the document and put it into out. -// If the types are not compatible, the returned ok value will be -// false and out will be unchanged. -func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { - - start := d.i - - if kind == 0x03 { - // Delegate unmarshaling of documents. - outt := out.Type() - outk := out.Kind() - switch outk { - case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: - d.readDocTo(out) - return true - } - if setterStyle(outt) != setterNone { - d.readDocTo(out) - return true - } - if outk == reflect.Slice { - switch outt.Elem() { - case typeDocElem: - out.Set(d.readDocElems(outt)) - case typeRawDocElem: - out.Set(d.readRawDocElems(outt)) - default: - d.readDocTo(blackHole) - } - return true - } - d.readDocTo(blackHole) - return true - } - - var in interface{} - - switch kind { - case 0x01: // Float64 - in = d.readFloat64() - case 0x02: // UTF-8 string - in = d.readStr() - case 0x03: // Document - panic("Can't happen. Handled above.") - case 0x04: // Array - outt := out.Type() - if setterStyle(outt) != setterNone { - // Skip the value so its data is handed to the setter below. - d.dropElem(kind) - break - } - for outt.Kind() == reflect.Ptr { - outt = outt.Elem() - } - switch outt.Kind() { - case reflect.Array: - d.readArrayDocTo(out) - return true - case reflect.Slice: - in = d.readSliceDoc(outt) - default: - in = d.readSliceDoc(typeSlice) - } - case 0x05: // Binary - b := d.readBinary() - if b.Kind == 0x00 || b.Kind == 0x02 { - in = b.Data - } else { - in = b - } - case 0x06: // Undefined (obsolete, but still seen in the wild) - in = Undefined - case 0x07: // ObjectId - in = ObjectId(d.readBytes(12)) - case 0x08: // Bool - in = d.readBool() - case 0x09: // Timestamp - // MongoDB handles timestamps as milliseconds. - i := d.readInt64() - if i == -62135596800000 { - in = time.Time{} // In UTC for convenience. - } else { - in = time.Unix(i/1e3, i%1e3*1e6) - } - case 0x0A: // Nil - in = nil - case 0x0B: // RegEx - in = d.readRegEx() - case 0x0C: - in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} - case 0x0D: // JavaScript without scope - in = JavaScript{Code: d.readStr()} - case 0x0E: // Symbol - in = Symbol(d.readStr()) - case 0x0F: // JavaScript with scope - d.i += 4 // Skip length - js := JavaScript{d.readStr(), make(M)} - d.readDocTo(reflect.ValueOf(js.Scope)) - in = js - case 0x10: // Int32 - in = int(d.readInt32()) - case 0x11: // Mongo-specific timestamp - in = MongoTimestamp(d.readInt64()) - case 0x12: // Int64 - in = d.readInt64() - case 0x13: // Decimal128 - in = Decimal128{ - l: uint64(d.readInt64()), - h: uint64(d.readInt64()), - } - case 0x7F: // Max key - in = MaxKey - case 0xFF: // Min key - in = MinKey - default: - panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) - } - - outt := out.Type() - - if outt == typeRaw { - out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]})) - return true - } - - if setter := getSetter(outt, out); setter != nil { - err := setter.SetBSON(Raw{kind, d.in[start:d.i]}) - if err == SetZero { - out.Set(reflect.Zero(outt)) - return true - } - if err == nil { - return true - } - if _, ok := err.(*TypeError); !ok { - panic(err) - } - return false - } - - if in == nil { - out.Set(reflect.Zero(outt)) - return true - } - - outk := outt.Kind() - - // Dereference and initialize pointer if necessary. - first := true - for outk == reflect.Ptr { - if !out.IsNil() { - out = out.Elem() - } else { - elem := reflect.New(outt.Elem()) - if first { - // Only set if value is compatible. - first = false - defer func(out, elem reflect.Value) { - if good { - out.Set(elem) - } - }(out, elem) - } else { - out.Set(elem) - } - out = elem - } - outt = out.Type() - outk = outt.Kind() - } - - inv := reflect.ValueOf(in) - if outt == inv.Type() { - out.Set(inv) - return true - } - - switch outk { - case reflect.Interface: - out.Set(inv) - return true - case reflect.String: - switch inv.Kind() { - case reflect.String: - out.SetString(inv.String()) - return true - case reflect.Slice: - if b, ok := in.([]byte); ok { - out.SetString(string(b)) - return true - } - case reflect.Int, reflect.Int64: - if outt == typeJSONNumber { - out.SetString(strconv.FormatInt(inv.Int(), 10)) - return true - } - case reflect.Float64: - if outt == typeJSONNumber { - out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) - return true - } - } - case reflect.Slice, reflect.Array: - // Remember, array (0x04) slices are built with the correct - // element type. If we are here, must be a cross BSON kind - // conversion (e.g. 0x05 unmarshalling on string). - if outt.Elem().Kind() != reflect.Uint8 { - break - } - switch inv.Kind() { - case reflect.String: - slice := []byte(inv.String()) - out.Set(reflect.ValueOf(slice)) - return true - case reflect.Slice: - switch outt.Kind() { - case reflect.Array: - reflect.Copy(out, inv) - case reflect.Slice: - out.SetBytes(inv.Bytes()) - } - return true - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch inv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetInt(inv.Int()) - return true - case reflect.Float32, reflect.Float64: - out.SetInt(int64(inv.Float())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetInt(1) - } else { - out.SetInt(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("can't happen: no uint types in BSON (!?)") - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch inv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetUint(uint64(inv.Int())) - return true - case reflect.Float32, reflect.Float64: - out.SetUint(uint64(inv.Float())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetUint(1) - } else { - out.SetUint(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON.") - } - case reflect.Float32, reflect.Float64: - switch inv.Kind() { - case reflect.Float32, reflect.Float64: - out.SetFloat(inv.Float()) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetFloat(float64(inv.Int())) - return true - case reflect.Bool: - if inv.Bool() { - out.SetFloat(1) - } else { - out.SetFloat(0) - } - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON?") - } - case reflect.Bool: - switch inv.Kind() { - case reflect.Bool: - out.SetBool(inv.Bool()) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - out.SetBool(inv.Int() != 0) - return true - case reflect.Float32, reflect.Float64: - out.SetBool(inv.Float() != 0) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON?") - } - case reflect.Struct: - if outt == typeURL && inv.Kind() == reflect.String { - u, err := url.Parse(inv.String()) - if err != nil { - panic(err) - } - out.Set(reflect.ValueOf(u).Elem()) - return true - } - if outt == typeBinary { - if b, ok := in.([]byte); ok { - out.Set(reflect.ValueOf(Binary{Data: b})) - return true - } - } - } - - return false -} - -// -------------------------------------------------------------------------- -// Parsers of basic types. - -func (d *decoder) readRegEx() RegEx { - re := RegEx{} - re.Pattern = d.readCStr() - re.Options = d.readCStr() - return re -} - -func (d *decoder) readBinary() Binary { - l := d.readInt32() - b := Binary{} - b.Kind = d.readByte() - b.Data = d.readBytes(l) - if b.Kind == 0x02 && len(b.Data) >= 4 { - // Weird obsolete format with redundant length. - b.Data = b.Data[4:] - } - return b -} - -func (d *decoder) readStr() string { - l := d.readInt32() - b := d.readBytes(l - 1) - if d.readByte() != '\x00' { - corrupted() - } - return string(b) -} - -func (d *decoder) readCStr() string { - start := d.i - end := start - l := len(d.in) - for ; end != l; end++ { - if d.in[end] == '\x00' { - break - } - } - d.i = end + 1 - if d.i > l { - corrupted() - } - return string(d.in[start:end]) -} - -func (d *decoder) readBool() bool { - b := d.readByte() - if b == 0 { - return false - } - if b == 1 { - return true - } - panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) -} - -func (d *decoder) readFloat64() float64 { - return math.Float64frombits(uint64(d.readInt64())) -} - -func (d *decoder) readInt32() int32 { - b := d.readBytes(4) - return int32((uint32(b[0]) << 0) | - (uint32(b[1]) << 8) | - (uint32(b[2]) << 16) | - (uint32(b[3]) << 24)) -} - -func (d *decoder) readInt64() int64 { - b := d.readBytes(8) - return int64((uint64(b[0]) << 0) | - (uint64(b[1]) << 8) | - (uint64(b[2]) << 16) | - (uint64(b[3]) << 24) | - (uint64(b[4]) << 32) | - (uint64(b[5]) << 40) | - (uint64(b[6]) << 48) | - (uint64(b[7]) << 56)) -} - -func (d *decoder) readByte() byte { - i := d.i - d.i++ - if d.i > len(d.in) { - corrupted() - } - return d.in[i] -} - -func (d *decoder) readBytes(length int32) []byte { - if length < 0 { - corrupted() - } - start := d.i - d.i += int(length) - if d.i < start || d.i > len(d.in) { - corrupted() - } - return d.in[start : start+int(length)] -} diff --git a/vendor/gopkg.in/mgo.v2/bson/encode.go b/vendor/gopkg.in/mgo.v2/bson/encode.go deleted file mode 100644 index add39e8..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/encode.go +++ /dev/null @@ -1,514 +0,0 @@ -// BSON library for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// gobson - BSON library for Go. - -package bson - -import ( - "encoding/json" - "fmt" - "math" - "net/url" - "reflect" - "strconv" - "time" -) - -// -------------------------------------------------------------------------- -// Some internal infrastructure. - -var ( - typeBinary = reflect.TypeOf(Binary{}) - typeObjectId = reflect.TypeOf(ObjectId("")) - typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) - typeSymbol = reflect.TypeOf(Symbol("")) - typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) - typeOrderKey = reflect.TypeOf(MinKey) - typeDocElem = reflect.TypeOf(DocElem{}) - typeRawDocElem = reflect.TypeOf(RawDocElem{}) - typeRaw = reflect.TypeOf(Raw{}) - typeURL = reflect.TypeOf(url.URL{}) - typeTime = reflect.TypeOf(time.Time{}) - typeString = reflect.TypeOf("") - typeJSONNumber = reflect.TypeOf(json.Number("")) -) - -const itoaCacheSize = 32 - -var itoaCache []string - -func init() { - itoaCache = make([]string, itoaCacheSize) - for i := 0; i != itoaCacheSize; i++ { - itoaCache[i] = strconv.Itoa(i) - } -} - -func itoa(i int) string { - if i < itoaCacheSize { - return itoaCache[i] - } - return strconv.Itoa(i) -} - -// -------------------------------------------------------------------------- -// Marshaling of the document value itself. - -type encoder struct { - out []byte -} - -func (e *encoder) addDoc(v reflect.Value) { - for { - if vi, ok := v.Interface().(Getter); ok { - getv, err := vi.GetBSON() - if err != nil { - panic(err) - } - v = reflect.ValueOf(getv) - continue - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - continue - } - break - } - - if v.Type() == typeRaw { - raw := v.Interface().(Raw) - if raw.Kind != 0x03 && raw.Kind != 0x00 { - panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") - } - if len(raw.Data) == 0 { - panic("Attempted to marshal empty Raw document") - } - e.addBytes(raw.Data...) - return - } - - start := e.reserveInt32() - - switch v.Kind() { - case reflect.Map: - e.addMap(v) - case reflect.Struct: - e.addStruct(v) - case reflect.Array, reflect.Slice: - e.addSlice(v) - default: - panic("Can't marshal " + v.Type().String() + " as a BSON document") - } - - e.addBytes(0) - e.setInt32(start, int32(len(e.out)-start)) -} - -func (e *encoder) addMap(v reflect.Value) { - for _, k := range v.MapKeys() { - e.addElem(k.String(), v.MapIndex(k), false) - } -} - -func (e *encoder) addStruct(v reflect.Value) { - sinfo, err := getStructInfo(v.Type()) - if err != nil { - panic(err) - } - var value reflect.Value - if sinfo.InlineMap >= 0 { - m := v.Field(sinfo.InlineMap) - if m.Len() > 0 { - for _, k := range m.MapKeys() { - ks := k.String() - if _, found := sinfo.FieldsMap[ks]; found { - panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) - } - e.addElem(ks, m.MapIndex(k), false) - } - } - } - for _, info := range sinfo.FieldsList { - if info.Inline == nil { - value = v.Field(info.Num) - } else { - value = v.FieldByIndex(info.Inline) - } - if info.OmitEmpty && isZero(value) { - continue - } - e.addElem(info.Key, value, info.MinSize) - } -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.String: - return len(v.String()) == 0 - case reflect.Ptr, reflect.Interface: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - if vt == typeTime { - return v.Interface().(time.Time).IsZero() - } - for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} - -func (e *encoder) addSlice(v reflect.Value) { - vi := v.Interface() - if d, ok := vi.(D); ok { - for _, elem := range d { - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - if d, ok := vi.(RawD); ok { - for _, elem := range d { - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - l := v.Len() - et := v.Type().Elem() - if et == typeDocElem { - for i := 0; i < l; i++ { - elem := v.Index(i).Interface().(DocElem) - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - if et == typeRawDocElem { - for i := 0; i < l; i++ { - elem := v.Index(i).Interface().(RawDocElem) - e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) - } - return - } - for i := 0; i < l; i++ { - e.addElem(itoa(i), v.Index(i), false) - } -} - -// -------------------------------------------------------------------------- -// Marshaling of elements in a document. - -func (e *encoder) addElemName(kind byte, name string) { - e.addBytes(kind) - e.addBytes([]byte(name)...) - e.addBytes(0) -} - -func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { - - if !v.IsValid() { - e.addElemName(0x0A, name) - return - } - - if getter, ok := v.Interface().(Getter); ok { - getv, err := getter.GetBSON() - if err != nil { - panic(err) - } - e.addElem(name, reflect.ValueOf(getv), minSize) - return - } - - switch v.Kind() { - - case reflect.Interface: - e.addElem(name, v.Elem(), minSize) - - case reflect.Ptr: - e.addElem(name, v.Elem(), minSize) - - case reflect.String: - s := v.String() - switch v.Type() { - case typeObjectId: - if len(s) != 12 { - panic("ObjectIDs must be exactly 12 bytes long (got " + - strconv.Itoa(len(s)) + ")") - } - e.addElemName(0x07, name) - e.addBytes([]byte(s)...) - case typeSymbol: - e.addElemName(0x0E, name) - e.addStr(s) - case typeJSONNumber: - n := v.Interface().(json.Number) - if i, err := n.Int64(); err == nil { - e.addElemName(0x12, name) - e.addInt64(i) - } else if f, err := n.Float64(); err == nil { - e.addElemName(0x01, name) - e.addFloat64(f) - } else { - panic("failed to convert json.Number to a number: " + s) - } - default: - e.addElemName(0x02, name) - e.addStr(s) - } - - case reflect.Float32, reflect.Float64: - e.addElemName(0x01, name) - e.addFloat64(v.Float()) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - u := v.Uint() - if int64(u) < 0 { - panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") - } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { - e.addElemName(0x10, name) - e.addInt32(int32(u)) - } else { - e.addElemName(0x12, name) - e.addInt64(int64(u)) - } - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - switch v.Type() { - case typeMongoTimestamp: - e.addElemName(0x11, name) - e.addInt64(v.Int()) - - case typeOrderKey: - if v.Int() == int64(MaxKey) { - e.addElemName(0x7F, name) - } else { - e.addElemName(0xFF, name) - } - - default: - i := v.Int() - if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { - // It fits into an int32, encode as such. - e.addElemName(0x10, name) - e.addInt32(int32(i)) - } else { - e.addElemName(0x12, name) - e.addInt64(i) - } - } - - case reflect.Bool: - e.addElemName(0x08, name) - if v.Bool() { - e.addBytes(1) - } else { - e.addBytes(0) - } - - case reflect.Map: - e.addElemName(0x03, name) - e.addDoc(v) - - case reflect.Slice: - vt := v.Type() - et := vt.Elem() - if et.Kind() == reflect.Uint8 { - e.addElemName(0x05, name) - e.addBinary(0x00, v.Bytes()) - } else if et == typeDocElem || et == typeRawDocElem { - e.addElemName(0x03, name) - e.addDoc(v) - } else { - e.addElemName(0x04, name) - e.addDoc(v) - } - - case reflect.Array: - et := v.Type().Elem() - if et.Kind() == reflect.Uint8 { - e.addElemName(0x05, name) - if v.CanAddr() { - e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) - } else { - n := v.Len() - e.addInt32(int32(n)) - e.addBytes(0x00) - for i := 0; i < n; i++ { - el := v.Index(i) - e.addBytes(byte(el.Uint())) - } - } - } else { - e.addElemName(0x04, name) - e.addDoc(v) - } - - case reflect.Struct: - switch s := v.Interface().(type) { - - case Raw: - kind := s.Kind - if kind == 0x00 { - kind = 0x03 - } - if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { - panic("Attempted to marshal empty Raw document") - } - e.addElemName(kind, name) - e.addBytes(s.Data...) - - case Binary: - e.addElemName(0x05, name) - e.addBinary(s.Kind, s.Data) - - case Decimal128: - e.addElemName(0x13, name) - e.addInt64(int64(s.l)) - e.addInt64(int64(s.h)) - - case DBPointer: - e.addElemName(0x0C, name) - e.addStr(s.Namespace) - if len(s.Id) != 12 { - panic("ObjectIDs must be exactly 12 bytes long (got " + - strconv.Itoa(len(s.Id)) + ")") - } - e.addBytes([]byte(s.Id)...) - - case RegEx: - e.addElemName(0x0B, name) - e.addCStr(s.Pattern) - e.addCStr(s.Options) - - case JavaScript: - if s.Scope == nil { - e.addElemName(0x0D, name) - e.addStr(s.Code) - } else { - e.addElemName(0x0F, name) - start := e.reserveInt32() - e.addStr(s.Code) - e.addDoc(reflect.ValueOf(s.Scope)) - e.setInt32(start, int32(len(e.out)-start)) - } - - case time.Time: - // MongoDB handles timestamps as milliseconds. - e.addElemName(0x09, name) - e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) - - case url.URL: - e.addElemName(0x02, name) - e.addStr(s.String()) - - case undefined: - e.addElemName(0x06, name) - - default: - e.addElemName(0x03, name) - e.addDoc(v) - } - - default: - panic("Can't marshal " + v.Type().String() + " in a BSON document") - } -} - -// -------------------------------------------------------------------------- -// Marshaling of base types. - -func (e *encoder) addBinary(subtype byte, v []byte) { - if subtype == 0x02 { - // Wonder how that brilliant idea came to life. Obsolete, luckily. - e.addInt32(int32(len(v) + 4)) - e.addBytes(subtype) - e.addInt32(int32(len(v))) - } else { - e.addInt32(int32(len(v))) - e.addBytes(subtype) - } - e.addBytes(v...) -} - -func (e *encoder) addStr(v string) { - e.addInt32(int32(len(v) + 1)) - e.addCStr(v) -} - -func (e *encoder) addCStr(v string) { - e.addBytes([]byte(v)...) - e.addBytes(0) -} - -func (e *encoder) reserveInt32() (pos int) { - pos = len(e.out) - e.addBytes(0, 0, 0, 0) - return pos -} - -func (e *encoder) setInt32(pos int, v int32) { - e.out[pos+0] = byte(v) - e.out[pos+1] = byte(v >> 8) - e.out[pos+2] = byte(v >> 16) - e.out[pos+3] = byte(v >> 24) -} - -func (e *encoder) addInt32(v int32) { - u := uint32(v) - e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) -} - -func (e *encoder) addInt64(v int64) { - u := uint64(v) - e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), - byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) -} - -func (e *encoder) addFloat64(v float64) { - e.addInt64(int64(math.Float64bits(v))) -} - -func (e *encoder) addBytes(v ...byte) { - e.out = append(e.out, v...) -} diff --git a/vendor/gopkg.in/mgo.v2/bson/json.go b/vendor/gopkg.in/mgo.v2/bson/json.go deleted file mode 100644 index d37f835..0000000 --- a/vendor/gopkg.in/mgo.v2/bson/json.go +++ /dev/null @@ -1,380 +0,0 @@ -package bson - -import ( - "bytes" - "encoding/base64" - "fmt" - "gopkg.in/mgo.v2/internal/json" - "strconv" - "time" -) - -// UnmarshalJSON unmarshals a JSON value that may hold non-standard -// syntax as defined in BSON's extended JSON specification. -func UnmarshalJSON(data []byte, value interface{}) error { - d := json.NewDecoder(bytes.NewBuffer(data)) - d.Extend(&jsonExt) - return d.Decode(value) -} - -// MarshalJSON marshals a JSON value that may hold non-standard -// syntax as defined in BSON's extended JSON specification. -func MarshalJSON(value interface{}) ([]byte, error) { - var buf bytes.Buffer - e := json.NewEncoder(&buf) - e.Extend(&jsonExt) - err := e.Encode(value) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// jdec is used internally by the JSON decoding functions -// so they may unmarshal functions without getting into endless -// recursion due to keyed objects. -func jdec(data []byte, value interface{}) error { - d := json.NewDecoder(bytes.NewBuffer(data)) - d.Extend(&funcExt) - return d.Decode(value) -} - -var jsonExt json.Extension -var funcExt json.Extension - -// TODO -// - Shell regular expressions ("/regexp/opts") - -func init() { - jsonExt.DecodeUnquotedKeys(true) - jsonExt.DecodeTrailingCommas(true) - - funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary") - jsonExt.DecodeKeyed("$binary", jdecBinary) - jsonExt.DecodeKeyed("$binaryFunc", jdecBinary) - jsonExt.EncodeType([]byte(nil), jencBinarySlice) - jsonExt.EncodeType(Binary{}, jencBinaryType) - - funcExt.DecodeFunc("ISODate", "$dateFunc", "S") - funcExt.DecodeFunc("new Date", "$dateFunc", "S") - jsonExt.DecodeKeyed("$date", jdecDate) - jsonExt.DecodeKeyed("$dateFunc", jdecDate) - jsonExt.EncodeType(time.Time{}, jencDate) - - funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i") - jsonExt.DecodeKeyed("$timestamp", jdecTimestamp) - jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp) - - funcExt.DecodeConst("undefined", Undefined) - - jsonExt.DecodeKeyed("$regex", jdecRegEx) - jsonExt.EncodeType(RegEx{}, jencRegEx) - - funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id") - jsonExt.DecodeKeyed("$oid", jdecObjectId) - jsonExt.DecodeKeyed("$oidFunc", jdecObjectId) - jsonExt.EncodeType(ObjectId(""), jencObjectId) - - funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id") - jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef) - - funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N") - jsonExt.DecodeKeyed("$numberLong", jdecNumberLong) - jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong) - jsonExt.EncodeType(int64(0), jencNumberLong) - jsonExt.EncodeType(int(0), jencInt) - - funcExt.DecodeConst("MinKey", MinKey) - funcExt.DecodeConst("MaxKey", MaxKey) - jsonExt.DecodeKeyed("$minKey", jdecMinKey) - jsonExt.DecodeKeyed("$maxKey", jdecMaxKey) - jsonExt.EncodeType(orderKey(0), jencMinMaxKey) - - jsonExt.DecodeKeyed("$undefined", jdecUndefined) - jsonExt.EncodeType(Undefined, jencUndefined) - - jsonExt.Extend(&funcExt) -} - -func fbytes(format string, args ...interface{}) []byte { - var buf bytes.Buffer - fmt.Fprintf(&buf, format, args...) - return buf.Bytes() -} - -func jdecBinary(data []byte) (interface{}, error) { - var v struct { - Binary []byte `json:"$binary"` - Type string `json:"$type"` - Func struct { - Binary []byte `json:"$binary"` - Type int64 `json:"$type"` - } `json:"$binaryFunc"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - - var binData []byte - var binKind int64 - if v.Type == "" && v.Binary == nil { - binData = v.Func.Binary - binKind = v.Func.Type - } else if v.Type == "" { - return v.Binary, nil - } else { - binData = v.Binary - binKind, err = strconv.ParseInt(v.Type, 0, 64) - if err != nil { - binKind = -1 - } - } - - if binKind == 0 { - return binData, nil - } - if binKind < 0 || binKind > 255 { - return nil, fmt.Errorf("invalid type in binary object: %s", data) - } - - return Binary{Kind: byte(binKind), Data: binData}, nil -} - -func jencBinarySlice(v interface{}) ([]byte, error) { - in := v.([]byte) - out := make([]byte, base64.StdEncoding.EncodedLen(len(in))) - base64.StdEncoding.Encode(out, in) - return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil -} - -func jencBinaryType(v interface{}) ([]byte, error) { - in := v.(Binary) - out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data))) - base64.StdEncoding.Encode(out, in.Data) - return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil -} - -const jdateFormat = "2006-01-02T15:04:05.999Z" - -func jdecDate(data []byte) (interface{}, error) { - var v struct { - S string `json:"$date"` - Func struct { - S string - } `json:"$dateFunc"` - } - _ = jdec(data, &v) - if v.S == "" { - v.S = v.Func.S - } - if v.S != "" { - for _, format := range []string{jdateFormat, "2006-01-02"} { - t, err := time.Parse(format, v.S) - if err == nil { - return t, nil - } - } - return nil, fmt.Errorf("cannot parse date: %q", v.S) - } - - var vn struct { - Date struct { - N int64 `json:"$numberLong,string"` - } `json:"$date"` - Func struct { - S int64 - } `json:"$dateFunc"` - } - err := jdec(data, &vn) - if err != nil { - return nil, fmt.Errorf("cannot parse date: %q", data) - } - n := vn.Date.N - if n == 0 { - n = vn.Func.S - } - return time.Unix(n/1000, n%1000*1e6).UTC(), nil -} - -func jencDate(v interface{}) ([]byte, error) { - t := v.(time.Time) - return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil -} - -func jdecTimestamp(data []byte) (interface{}, error) { - var v struct { - Func struct { - T int32 `json:"t"` - I int32 `json:"i"` - } `json:"$timestamp"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil -} - -func jencTimestamp(v interface{}) ([]byte, error) { - ts := uint64(v.(MongoTimestamp)) - return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil -} - -func jdecRegEx(data []byte) (interface{}, error) { - var v struct { - Regex string `json:"$regex"` - Options string `json:"$options"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - return RegEx{v.Regex, v.Options}, nil -} - -func jencRegEx(v interface{}) ([]byte, error) { - re := v.(RegEx) - type regex struct { - Regex string `json:"$regex"` - Options string `json:"$options"` - } - return json.Marshal(regex{re.Pattern, re.Options}) -} - -func jdecObjectId(data []byte) (interface{}, error) { - var v struct { - Id string `json:"$oid"` - Func struct { - Id string - } `json:"$oidFunc"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.Id == "" { - v.Id = v.Func.Id - } - return ObjectIdHex(v.Id), nil -} - -func jencObjectId(v interface{}) ([]byte, error) { - return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil -} - -func jdecDBRef(data []byte) (interface{}, error) { - // TODO Support unmarshaling $ref and $id into the input value. - var v struct { - Obj map[string]interface{} `json:"$dbrefFunc"` - } - // TODO Fix this. Must not be required. - v.Obj = make(map[string]interface{}) - err := jdec(data, &v) - if err != nil { - return nil, err - } - return v.Obj, nil -} - -func jdecNumberLong(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$numberLong,string"` - Func struct { - N int64 `json:",string"` - } `json:"$numberLongFunc"` - } - var vn struct { - N int64 `json:"$numberLong"` - Func struct { - N int64 - } `json:"$numberLongFunc"` - } - err := jdec(data, &v) - if err != nil { - err = jdec(data, &vn) - v.N = vn.N - v.Func.N = vn.Func.N - } - if err != nil { - return nil, err - } - if v.N != 0 { - return v.N, nil - } - return v.Func.N, nil -} - -func jencNumberLong(v interface{}) ([]byte, error) { - n := v.(int64) - f := `{"$numberLong":"%d"}` - if n <= 1<<53 { - f = `{"$numberLong":%d}` - } - return fbytes(f, n), nil -} - -func jencInt(v interface{}) ([]byte, error) { - n := v.(int) - f := `{"$numberLong":"%d"}` - if n <= 1<<53 { - f = `%d` - } - return fbytes(f, n), nil -} - -func jdecMinKey(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$minKey"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.N != 1 { - return nil, fmt.Errorf("invalid $minKey object: %s", data) - } - return MinKey, nil -} - -func jdecMaxKey(data []byte) (interface{}, error) { - var v struct { - N int64 `json:"$maxKey"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if v.N != 1 { - return nil, fmt.Errorf("invalid $maxKey object: %s", data) - } - return MaxKey, nil -} - -func jencMinMaxKey(v interface{}) ([]byte, error) { - switch v.(orderKey) { - case MinKey: - return []byte(`{"$minKey":1}`), nil - case MaxKey: - return []byte(`{"$maxKey":1}`), nil - } - panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v)) -} - -func jdecUndefined(data []byte) (interface{}, error) { - var v struct { - B bool `json:"$undefined"` - } - err := jdec(data, &v) - if err != nil { - return nil, err - } - if !v.B { - return nil, fmt.Errorf("invalid $undefined object: %s", data) - } - return Undefined, nil -} - -func jencUndefined(v interface{}) ([]byte, error) { - return []byte(`{"$undefined":true}`), nil -} diff --git a/vendor/gopkg.in/mgo.v2/bulk.go b/vendor/gopkg.in/mgo.v2/bulk.go deleted file mode 100644 index 072a520..0000000 --- a/vendor/gopkg.in/mgo.v2/bulk.go +++ /dev/null @@ -1,351 +0,0 @@ -package mgo - -import ( - "bytes" - "sort" - - "gopkg.in/mgo.v2/bson" -) - -// Bulk represents an operation that can be prepared with several -// orthogonal changes before being delivered to the server. -// -// MongoDB servers older than version 2.6 do not have proper support for bulk -// operations, so the driver attempts to map its API as much as possible into -// the functionality that works. In particular, in those releases updates and -// removals are sent individually, and inserts are sent in bulk but have -// suboptimal error reporting compared to more recent versions of the server. -// See the documentation of BulkErrorCase for details on that. -// -// Relevant documentation: -// -// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api -// -type Bulk struct { - c *Collection - opcount int - actions []bulkAction - ordered bool -} - -type bulkOp int - -const ( - bulkInsert bulkOp = iota + 1 - bulkUpdate - bulkUpdateAll - bulkRemove -) - -type bulkAction struct { - op bulkOp - docs []interface{} - idxs []int -} - -type bulkUpdateOp []interface{} -type bulkDeleteOp []interface{} - -// BulkResult holds the results for a bulk operation. -type BulkResult struct { - Matched int - Modified int // Available only for MongoDB 2.6+ - - // Be conservative while we understand exactly how to report these - // results in a useful and convenient way, and also how to emulate - // them with prior servers. - private bool -} - -// BulkError holds an error returned from running a Bulk operation. -// Individual errors may be obtained and inspected via the Cases method. -type BulkError struct { - ecases []BulkErrorCase -} - -func (e *BulkError) Error() string { - if len(e.ecases) == 0 { - return "invalid BulkError instance: no errors" - } - if len(e.ecases) == 1 { - return e.ecases[0].Err.Error() - } - msgs := make([]string, 0, len(e.ecases)) - seen := make(map[string]bool) - for _, ecase := range e.ecases { - msg := ecase.Err.Error() - if !seen[msg] { - seen[msg] = true - msgs = append(msgs, msg) - } - } - if len(msgs) == 1 { - return msgs[0] - } - var buf bytes.Buffer - buf.WriteString("multiple errors in bulk operation:\n") - for _, msg := range msgs { - buf.WriteString(" - ") - buf.WriteString(msg) - buf.WriteByte('\n') - } - return buf.String() -} - -type bulkErrorCases []BulkErrorCase - -func (slice bulkErrorCases) Len() int { return len(slice) } -func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index } -func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } - -// BulkErrorCase holds an individual error found while attempting a single change -// within a bulk operation, and the position in which it was enqueued. -// -// MongoDB servers older than version 2.6 do not have proper support for bulk -// operations, so the driver attempts to map its API as much as possible into -// the functionality that works. In particular, only the last error is reported -// for bulk inserts and without any positional information, so the Index -// field is set to -1 in these cases. -type BulkErrorCase struct { - Index int // Position of operation that failed, or -1 if unknown. - Err error -} - -// Cases returns all individual errors found while attempting the requested changes. -// -// See the documentation of BulkErrorCase for limitations in older MongoDB releases. -func (e *BulkError) Cases() []BulkErrorCase { - return e.ecases -} - -// Bulk returns a value to prepare the execution of a bulk operation. -func (c *Collection) Bulk() *Bulk { - return &Bulk{c: c, ordered: true} -} - -// Unordered puts the bulk operation in unordered mode. -// -// In unordered mode the indvidual operations may be sent -// out of order, which means latter operations may proceed -// even if prior ones have failed. -func (b *Bulk) Unordered() { - b.ordered = false -} - -func (b *Bulk) action(op bulkOp, opcount int) *bulkAction { - var action *bulkAction - if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { - action = &b.actions[len(b.actions)-1] - } else if !b.ordered { - for i := range b.actions { - if b.actions[i].op == op { - action = &b.actions[i] - break - } - } - } - if action == nil { - b.actions = append(b.actions, bulkAction{op: op}) - action = &b.actions[len(b.actions)-1] - } - for i := 0; i < opcount; i++ { - action.idxs = append(action.idxs, b.opcount) - b.opcount++ - } - return action -} - -// Insert queues up the provided documents for insertion. -func (b *Bulk) Insert(docs ...interface{}) { - action := b.action(bulkInsert, len(docs)) - action.docs = append(action.docs, docs...) -} - -// Remove queues up the provided selectors for removing matching documents. -// Each selector will remove only a single matching document. -func (b *Bulk) Remove(selectors ...interface{}) { - action := b.action(bulkRemove, len(selectors)) - for _, selector := range selectors { - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &deleteOp{ - Collection: b.c.FullName, - Selector: selector, - Flags: 1, - Limit: 1, - }) - } -} - -// RemoveAll queues up the provided selectors for removing all matching documents. -// Each selector will remove all matching documents. -func (b *Bulk) RemoveAll(selectors ...interface{}) { - action := b.action(bulkRemove, len(selectors)) - for _, selector := range selectors { - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &deleteOp{ - Collection: b.c.FullName, - Selector: selector, - Flags: 0, - Limit: 0, - }) - } -} - -// Update queues up the provided pairs of updating instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair matches exactly one document for updating at most. -func (b *Bulk) Update(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.Update requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - }) - } -} - -// UpdateAll queues up the provided pairs of updating instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair updates all documents matching the selector. -func (b *Bulk) UpdateAll(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.UpdateAll requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 2, - Multi: true, - }) - } -} - -// Upsert queues up the provided pairs of upserting instructions. -// The first element of each pair selects which documents must be -// updated, and the second element defines how to update it. -// Each pair matches exactly one document for updating at most. -func (b *Bulk) Upsert(pairs ...interface{}) { - if len(pairs)%2 != 0 { - panic("Bulk.Update requires an even number of parameters") - } - action := b.action(bulkUpdate, len(pairs)/2) - for i := 0; i < len(pairs); i += 2 { - selector := pairs[i] - if selector == nil { - selector = bson.D{} - } - action.docs = append(action.docs, &updateOp{ - Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 1, - Upsert: true, - }) - } -} - -// Run runs all the operations queued up. -// -// If an error is reported on an unordered bulk operation, the error value may -// be an aggregation of all issues observed. As an exception to that, Insert -// operations running on MongoDB versions prior to 2.6 will report the last -// error only due to a limitation in the wire protocol. -func (b *Bulk) Run() (*BulkResult, error) { - var result BulkResult - var berr BulkError - var failed bool - for i := range b.actions { - action := &b.actions[i] - var ok bool - switch action.op { - case bulkInsert: - ok = b.runInsert(action, &result, &berr) - case bulkUpdate: - ok = b.runUpdate(action, &result, &berr) - case bulkRemove: - ok = b.runRemove(action, &result, &berr) - default: - panic("unknown bulk operation") - } - if !ok { - failed = true - if b.ordered { - break - } - } - } - if failed { - sort.Sort(bulkErrorCases(berr.ecases)) - return nil, &berr - } - return &result, nil -} - -func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool { - op := &insertOp{b.c.FullName, action.docs, 0} - if !b.ordered { - op.flags = 1 // ContinueOnError - } - lerr, err := b.c.writeOp(op, b.ordered) - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { - lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) - if lerr != nil { - result.Matched += lerr.N - result.Modified += lerr.modified - } - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { - lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) - if lerr != nil { - result.Matched += lerr.N - result.Modified += lerr.modified - } - return b.checkSuccess(action, berr, lerr, err) -} - -func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool { - if lerr != nil && len(lerr.ecases) > 0 { - for i := 0; i < len(lerr.ecases); i++ { - // Map back from the local error index into the visible one. - ecase := lerr.ecases[i] - idx := ecase.Index - if idx >= 0 { - idx = action.idxs[idx] - } - berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err}) - } - return false - } else if err != nil { - for i := 0; i < len(action.idxs); i++ { - berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err}) - } - return false - } - return true -} diff --git a/vendor/gopkg.in/mgo.v2/cluster.go b/vendor/gopkg.in/mgo.v2/cluster.go deleted file mode 100644 index c3bf8b0..0000000 --- a/vendor/gopkg.in/mgo.v2/cluster.go +++ /dev/null @@ -1,682 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "errors" - "fmt" - "net" - "strconv" - "strings" - "sync" - "time" - - "gopkg.in/mgo.v2/bson" -) - -// --------------------------------------------------------------------------- -// Mongo cluster encapsulation. -// -// A cluster enables the communication with one or more servers participating -// in a mongo cluster. This works with individual servers, a replica set, -// a replica pair, one or multiple mongos routers, etc. - -type mongoCluster struct { - sync.RWMutex - serverSynced sync.Cond - userSeeds []string - dynaSeeds []string - servers mongoServers - masters mongoServers - references int - syncing bool - direct bool - failFast bool - syncCount uint - setName string - cachedIndex map[string]bool - sync chan bool - dial dialer -} - -func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster { - cluster := &mongoCluster{ - userSeeds: userSeeds, - references: 1, - direct: direct, - failFast: failFast, - dial: dial, - setName: setName, - } - cluster.serverSynced.L = cluster.RWMutex.RLocker() - cluster.sync = make(chan bool, 1) - stats.cluster(+1) - go cluster.syncServersLoop() - return cluster -} - -// Acquire increases the reference count for the cluster. -func (cluster *mongoCluster) Acquire() { - cluster.Lock() - cluster.references++ - debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) - cluster.Unlock() -} - -// Release decreases the reference count for the cluster. Once -// it reaches zero, all servers will be closed. -func (cluster *mongoCluster) Release() { - cluster.Lock() - if cluster.references == 0 { - panic("cluster.Release() with references == 0") - } - cluster.references-- - debugf("Cluster %p released (refs=%d)", cluster, cluster.references) - if cluster.references == 0 { - for _, server := range cluster.servers.Slice() { - server.Close() - } - // Wake up the sync loop so it can die. - cluster.syncServers() - stats.cluster(-1) - } - cluster.Unlock() -} - -func (cluster *mongoCluster) LiveServers() (servers []string) { - cluster.RLock() - for _, serv := range cluster.servers.Slice() { - servers = append(servers, serv.Addr) - } - cluster.RUnlock() - return servers -} - -func (cluster *mongoCluster) removeServer(server *mongoServer) { - cluster.Lock() - cluster.masters.Remove(server) - other := cluster.servers.Remove(server) - cluster.Unlock() - if other != nil { - other.Close() - log("Removed server ", server.Addr, " from cluster.") - } - server.Close() -} - -type isMasterResult struct { - IsMaster bool - Secondary bool - Primary string - Hosts []string - Passives []string - Tags bson.D - Msg string - SetName string `bson:"setName"` - MaxWireVersion int `bson:"maxWireVersion"` -} - -func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { - // Monotonic let's it talk to a slave and still hold the socket. - session := newSession(Monotonic, cluster, 10*time.Second) - session.setSocket(socket) - err := session.Run("ismaster", result) - session.Close() - return err -} - -type possibleTimeout interface { - Timeout() bool -} - -var syncSocketTimeout = 5 * time.Second - -func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { - var syncTimeout time.Duration - if raceDetector { - // This variable is only ever touched by tests. - globalMutex.Lock() - syncTimeout = syncSocketTimeout - globalMutex.Unlock() - } else { - syncTimeout = syncSocketTimeout - } - - addr := server.Addr - log("SYNC Processing ", addr, "...") - - // Retry a few times to avoid knocking a server down for a hiccup. - var result isMasterResult - var tryerr error - for retry := 0; ; retry++ { - if retry == 3 || retry == 1 && cluster.failFast { - return nil, nil, tryerr - } - if retry > 0 { - // Don't abuse the server needlessly if there's something actually wrong. - if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { - // Give a chance for waiters to timeout as well. - cluster.serverSynced.Broadcast() - } - time.Sleep(syncShortDelay) - } - - // It's not clear what would be a good timeout here. Is it - // better to wait longer or to retry? - socket, _, err := server.AcquireSocket(0, syncTimeout) - if err != nil { - tryerr = err - logf("SYNC Failed to get socket to %s: %v", addr, err) - continue - } - err = cluster.isMaster(socket, &result) - socket.Release() - if err != nil { - tryerr = err - logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) - continue - } - debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) - break - } - - if cluster.setName != "" && result.SetName != cluster.setName { - logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName) - return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName) - } - - if result.IsMaster { - debugf("SYNC %s is a master.", addr) - if !server.info.Master { - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) - stats.conn(+1, true) - } - } else if result.Secondary { - debugf("SYNC %s is a slave.", addr) - } else if cluster.direct { - logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) - } else { - logf("SYNC %s is neither a master nor a slave.", addr) - // Let stats track it as whatever was known before. - return nil, nil, errors.New(addr + " is not a master nor slave") - } - - info = &mongoServerInfo{ - Master: result.IsMaster, - Mongos: result.Msg == "isdbgrid", - Tags: result.Tags, - SetName: result.SetName, - MaxWireVersion: result.MaxWireVersion, - } - - hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) - if result.Primary != "" { - // First in the list to speed up master discovery. - hosts = append(hosts, result.Primary) - } - hosts = append(hosts, result.Hosts...) - hosts = append(hosts, result.Passives...) - - debugf("SYNC %s knows about the following peers: %#v", addr, hosts) - return info, hosts, nil -} - -type syncKind bool - -const ( - completeSync syncKind = true - partialSync syncKind = false -) - -func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { - cluster.Lock() - current := cluster.servers.Search(server.ResolvedAddr) - if current == nil { - if syncKind == partialSync { - cluster.Unlock() - server.Close() - log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") - return - } - cluster.servers.Add(server) - if info.Master { - cluster.masters.Add(server) - log("SYNC Adding ", server.Addr, " to cluster as a master.") - } else { - log("SYNC Adding ", server.Addr, " to cluster as a slave.") - } - } else { - if server != current { - panic("addServer attempting to add duplicated server") - } - if server.Info().Master != info.Master { - if info.Master { - log("SYNC Server ", server.Addr, " is now a master.") - cluster.masters.Add(server) - } else { - log("SYNC Server ", server.Addr, " is now a slave.") - cluster.masters.Remove(server) - } - } - } - server.SetInfo(info) - debugf("SYNC Broadcasting availability of server %s", server.Addr) - cluster.serverSynced.Broadcast() - cluster.Unlock() -} - -func (cluster *mongoCluster) getKnownAddrs() []string { - cluster.RLock() - max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() - seen := make(map[string]bool, max) - known := make([]string, 0, max) - - add := func(addr string) { - if _, found := seen[addr]; !found { - seen[addr] = true - known = append(known, addr) - } - } - - for _, addr := range cluster.userSeeds { - add(addr) - } - for _, addr := range cluster.dynaSeeds { - add(addr) - } - for _, serv := range cluster.servers.Slice() { - add(serv.Addr) - } - cluster.RUnlock() - - return known -} - -// syncServers injects a value into the cluster.sync channel to force -// an iteration of the syncServersLoop function. -func (cluster *mongoCluster) syncServers() { - select { - case cluster.sync <- true: - default: - } -} - -// How long to wait for a checkup of the cluster topology if nothing -// else kicks a synchronization before that. -const syncServersDelay = 30 * time.Second -const syncShortDelay = 500 * time.Millisecond - -// syncServersLoop loops while the cluster is alive to keep its idea of -// the server topology up-to-date. It must be called just once from -// newCluster. The loop iterates once syncServersDelay has passed, or -// if somebody injects a value into the cluster.sync channel to force a -// synchronization. A loop iteration will contact all servers in -// parallel, ask them about known peers and their own role within the -// cluster, and then attempt to do the same with all the peers -// retrieved. -func (cluster *mongoCluster) syncServersLoop() { - for { - debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) - - cluster.Lock() - if cluster.references == 0 { - cluster.Unlock() - break - } - cluster.references++ // Keep alive while syncing. - direct := cluster.direct - cluster.Unlock() - - cluster.syncServersIteration(direct) - - // We just synchronized, so consume any outstanding requests. - select { - case <-cluster.sync: - default: - } - - cluster.Release() - - // Hold off before allowing another sync. No point in - // burning CPU looking for down servers. - if !cluster.failFast { - time.Sleep(syncShortDelay) - } - - cluster.Lock() - if cluster.references == 0 { - cluster.Unlock() - break - } - cluster.syncCount++ - // Poke all waiters so they have a chance to timeout or - // restart syncing if they wish to. - cluster.serverSynced.Broadcast() - // Check if we have to restart immediately either way. - restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() - cluster.Unlock() - - if restart { - log("SYNC No masters found. Will synchronize again.") - time.Sleep(syncShortDelay) - continue - } - - debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) - - // Hold off until somebody explicitly requests a synchronization - // or it's time to check for a cluster topology change again. - select { - case <-cluster.sync: - case <-time.After(syncServersDelay): - } - } - debugf("SYNC Cluster %p is stopping its sync loop.", cluster) -} - -func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { - cluster.RLock() - server := cluster.servers.Search(tcpaddr.String()) - cluster.RUnlock() - if server != nil { - return server - } - return newServer(addr, tcpaddr, cluster.sync, cluster.dial) -} - -func resolveAddr(addr string) (*net.TCPAddr, error) { - // Simple cases that do not need actual resolution. Works with IPv4 and v6. - if host, port, err := net.SplitHostPort(addr); err == nil { - if port, _ := strconv.Atoi(port); port > 0 { - zone := "" - if i := strings.LastIndex(host, "%"); i >= 0 { - zone = host[i+1:] - host = host[:i] - } - ip := net.ParseIP(host) - if ip != nil { - return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil - } - } - } - - // Attempt to resolve IPv4 and v6 concurrently. - addrChan := make(chan *net.TCPAddr, 2) - for _, network := range []string{"udp4", "udp6"} { - network := network - go func() { - // The unfortunate UDP dialing hack allows having a timeout on address resolution. - conn, err := net.DialTimeout(network, addr, 10*time.Second) - if err != nil { - addrChan <- nil - } else { - addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) - conn.Close() - } - }() - } - - // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. - tcpaddr := <-addrChan - if tcpaddr == nil || len(tcpaddr.IP) != 4 { - var timeout <-chan time.Time - if tcpaddr != nil { - // Don't wait too long if an IPv6 address is known. - timeout = time.After(50 * time.Millisecond) - } - select { - case <-timeout: - case tcpaddr2 := <-addrChan: - if tcpaddr == nil || tcpaddr2 != nil { - // It's an IPv4 address or the only known address. Use it. - tcpaddr = tcpaddr2 - } - } - } - - if tcpaddr == nil { - log("SYNC Failed to resolve server address: ", addr) - return nil, errors.New("failed to resolve server address: " + addr) - } - if tcpaddr.String() != addr { - debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) - } - return tcpaddr, nil -} - -type pendingAdd struct { - server *mongoServer - info *mongoServerInfo -} - -func (cluster *mongoCluster) syncServersIteration(direct bool) { - log("SYNC Starting full topology synchronization...") - - var wg sync.WaitGroup - var m sync.Mutex - notYetAdded := make(map[string]pendingAdd) - addIfFound := make(map[string]bool) - seen := make(map[string]bool) - syncKind := partialSync - - var spawnSync func(addr string, byMaster bool) - spawnSync = func(addr string, byMaster bool) { - wg.Add(1) - go func() { - defer wg.Done() - - tcpaddr, err := resolveAddr(addr) - if err != nil { - log("SYNC Failed to start sync of ", addr, ": ", err.Error()) - return - } - resolvedAddr := tcpaddr.String() - - m.Lock() - if byMaster { - if pending, ok := notYetAdded[resolvedAddr]; ok { - delete(notYetAdded, resolvedAddr) - m.Unlock() - cluster.addServer(pending.server, pending.info, completeSync) - return - } - addIfFound[resolvedAddr] = true - } - if seen[resolvedAddr] { - m.Unlock() - return - } - seen[resolvedAddr] = true - m.Unlock() - - server := cluster.server(addr, tcpaddr) - info, hosts, err := cluster.syncServer(server) - if err != nil { - cluster.removeServer(server) - return - } - - m.Lock() - add := direct || info.Master || addIfFound[resolvedAddr] - if add { - syncKind = completeSync - } else { - notYetAdded[resolvedAddr] = pendingAdd{server, info} - } - m.Unlock() - if add { - cluster.addServer(server, info, completeSync) - } - if !direct { - for _, addr := range hosts { - spawnSync(addr, info.Master) - } - } - }() - } - - knownAddrs := cluster.getKnownAddrs() - for _, addr := range knownAddrs { - spawnSync(addr, false) - } - wg.Wait() - - if syncKind == completeSync { - logf("SYNC Synchronization was complete (got data from primary).") - for _, pending := range notYetAdded { - cluster.removeServer(pending.server) - } - } else { - logf("SYNC Synchronization was partial (cannot talk to primary).") - for _, pending := range notYetAdded { - cluster.addServer(pending.server, pending.info, partialSync) - } - } - - cluster.Lock() - mastersLen := cluster.masters.Len() - logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) - - // Update dynamic seeds, but only if we have any good servers. Otherwise, - // leave them alone for better chances of a successful sync in the future. - if syncKind == completeSync { - dynaSeeds := make([]string, cluster.servers.Len()) - for i, server := range cluster.servers.Slice() { - dynaSeeds[i] = server.Addr - } - cluster.dynaSeeds = dynaSeeds - debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) - } - cluster.Unlock() -} - -// AcquireSocket returns a socket to a server in the cluster. If slaveOk is -// true, it will attempt to return a socket to a slave server. If it is -// false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { - var started time.Time - var syncCount uint - warnedLimit := false - for { - cluster.RLock() - for { - mastersLen := cluster.masters.Len() - slavesLen := cluster.servers.Len() - mastersLen - debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) - if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { - break - } - if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { - break - } - if started.IsZero() { - // Initialize after fast path above. - started = time.Now() - syncCount = cluster.syncCount - } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { - cluster.RUnlock() - return nil, errors.New("no reachable servers") - } - log("Waiting for servers to synchronize...") - cluster.syncServers() - - // Remember: this will release and reacquire the lock. - cluster.serverSynced.Wait() - } - - var server *mongoServer - if slaveOk { - server = cluster.servers.BestFit(mode, serverTags) - } else { - server = cluster.masters.BestFit(mode, nil) - } - cluster.RUnlock() - - if server == nil { - // Must have failed the requested tags. Sleep to avoid spinning. - time.Sleep(1e8) - continue - } - - s, abended, err := server.AcquireSocket(poolLimit, socketTimeout) - if err == errPoolLimit { - if !warnedLimit { - warnedLimit = true - log("WARNING: Per-server connection limit reached.") - } - time.Sleep(100 * time.Millisecond) - continue - } - if err != nil { - cluster.removeServer(server) - cluster.syncServers() - continue - } - if abended && !slaveOk { - var result isMasterResult - err := cluster.isMaster(s, &result) - if err != nil || !result.IsMaster { - logf("Cannot confirm server %s as master (%v)", server.Addr, err) - s.Release() - cluster.syncServers() - time.Sleep(100 * time.Millisecond) - continue - } - } - return s, nil - } - panic("unreached") -} - -func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { - cluster.Lock() - if cluster.cachedIndex == nil { - cluster.cachedIndex = make(map[string]bool) - } - if exists { - cluster.cachedIndex[cacheKey] = true - } else { - delete(cluster.cachedIndex, cacheKey) - } - cluster.Unlock() -} - -func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { - cluster.RLock() - if cluster.cachedIndex != nil { - result = cluster.cachedIndex[cacheKey] - } - cluster.RUnlock() - return -} - -func (cluster *mongoCluster) ResetIndexCache() { - cluster.Lock() - cluster.cachedIndex = make(map[string]bool) - cluster.Unlock() -} diff --git a/vendor/gopkg.in/mgo.v2/doc.go b/vendor/gopkg.in/mgo.v2/doc.go deleted file mode 100644 index 859fd9b..0000000 --- a/vendor/gopkg.in/mgo.v2/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package mgo offers a rich MongoDB driver for Go. -// -// Details about the mgo project (pronounced as "mango") are found -// in its web page: -// -// http://labix.org/mgo -// -// Usage of the driver revolves around the concept of sessions. To -// get started, obtain a session using the Dial function: -// -// session, err := mgo.Dial(url) -// -// This will establish one or more connections with the cluster of -// servers defined by the url parameter. From then on, the cluster -// may be queried with multiple consistency rules (see SetMode) and -// documents retrieved with statements such as: -// -// c := session.DB(database).C(collection) -// err := c.Find(query).One(&result) -// -// New sessions are typically created by calling session.Copy on the -// initial session obtained at dial time. These new sessions will share -// the same cluster information and connection pool, and may be easily -// handed into other methods and functions for organizing logic. -// Every session created must have its Close method called at the end -// of its life time, so its resources may be put back in the pool or -// collected, depending on the case. -// -// For more details, see the documentation for the types and methods. -// -package mgo diff --git a/vendor/gopkg.in/mgo.v2/gridfs.go b/vendor/gopkg.in/mgo.v2/gridfs.go deleted file mode 100644 index 4214720..0000000 --- a/vendor/gopkg.in/mgo.v2/gridfs.go +++ /dev/null @@ -1,761 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo - -import ( - "crypto/md5" - "encoding/hex" - "errors" - "hash" - "io" - "os" - "sync" - "time" - - "gopkg.in/mgo.v2/bson" -) - -type GridFS struct { - Files *Collection - Chunks *Collection -} - -type gfsFileMode int - -const ( - gfsClosed gfsFileMode = 0 - gfsReading gfsFileMode = 1 - gfsWriting gfsFileMode = 2 -) - -type GridFile struct { - m sync.Mutex - c sync.Cond - gfs *GridFS - mode gfsFileMode - err error - - chunk int - offset int64 - - wpending int - wbuf []byte - wsum hash.Hash - - rbuf []byte - rcache *gfsCachedChunk - - doc gfsFile -} - -type gfsFile struct { - Id interface{} "_id" - ChunkSize int "chunkSize" - UploadDate time.Time "uploadDate" - Length int64 ",minsize" - MD5 string - Filename string ",omitempty" - ContentType string "contentType,omitempty" - Metadata *bson.Raw ",omitempty" -} - -type gfsChunk struct { - Id interface{} "_id" - FilesId interface{} "files_id" - N int - Data []byte -} - -type gfsCachedChunk struct { - wait sync.Mutex - n int - data []byte - err error -} - -func newGridFS(db *Database, prefix string) *GridFS { - return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} -} - -func (gfs *GridFS) newFile() *GridFile { - file := &GridFile{gfs: gfs} - file.c.L = &file.m - //runtime.SetFinalizer(file, finalizeFile) - return file -} - -func finalizeFile(file *GridFile) { - file.Close() -} - -// Create creates a new file with the provided name in the GridFS. If the file -// name already exists, a new version will be inserted with an up-to-date -// uploadDate that will cause it to be atomically visible to the Open and -// OpenId methods. If the file name is not important, an empty name may be -// provided and the file Id used instead. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// A simple example inserting a new file: -// -// func check(err error) { -// if err != nil { -// panic(err.String()) -// } -// } -// file, err := db.GridFS("fs").Create("myfile.txt") -// check(err) -// n, err := file.Write([]byte("Hello world!")) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes written\n", n) -// -// The io.Writer interface is implemented by *GridFile and may be used to -// help on the file creation. For example: -// -// file, err := db.GridFS("fs").Create("myfile.txt") -// check(err) -// messages, err := os.Open("/var/log/messages") -// check(err) -// defer messages.Close() -// err = io.Copy(file, messages) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) Create(name string) (file *GridFile, err error) { - file = gfs.newFile() - file.mode = gfsWriting - file.wsum = md5.New() - file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} - return -} - -// OpenId returns the file with the provided id, for reading. -// If the file isn't found, err will be set to mgo.ErrNotFound. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// The following example will print the first 8192 bytes from the file: -// -// func check(err error) { -// if err != nil { -// panic(err.String()) -// } -// } -// file, err := db.GridFS("fs").OpenId(objid) -// check(err) -// b := make([]byte, 8192) -// n, err := file.Read(b) -// check(err) -// fmt.Println(string(b)) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes read\n", n) -// -// The io.Reader interface is implemented by *GridFile and may be used to -// deal with it. As an example, the following snippet will dump the whole -// file into the standard output: -// -// file, err := db.GridFS("fs").OpenId(objid) -// check(err) -// err = io.Copy(os.Stdout, file) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { - var doc gfsFile - err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) - if err != nil { - return - } - file = gfs.newFile() - file.mode = gfsReading - file.doc = doc - return -} - -// Open returns the most recently uploaded file with the provided -// name, for reading. If the file isn't found, err will be set -// to mgo.ErrNotFound. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -// -// The following example will print the first 8192 bytes from the file: -// -// file, err := db.GridFS("fs").Open("myfile.txt") -// check(err) -// b := make([]byte, 8192) -// n, err := file.Read(b) -// check(err) -// fmt.Println(string(b)) -// check(err) -// err = file.Close() -// check(err) -// fmt.Printf("%d bytes read\n", n) -// -// The io.Reader interface is implemented by *GridFile and may be used to -// deal with it. As an example, the following snippet will dump the whole -// file into the standard output: -// -// file, err := db.GridFS("fs").Open("myfile.txt") -// check(err) -// err = io.Copy(os.Stdout, file) -// check(err) -// err = file.Close() -// check(err) -// -func (gfs *GridFS) Open(name string) (file *GridFile, err error) { - var doc gfsFile - err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) - if err != nil { - return - } - file = gfs.newFile() - file.mode = gfsReading - file.doc = doc - return -} - -// OpenNext opens the next file from iter for reading, sets *file to it, -// and returns true on the success case. If no more documents are available -// on iter or an error occurred, *file is set to nil and the result is false. -// Errors will be available via iter.Err(). -// -// The iter parameter must be an iterator on the GridFS files collection. -// Using the GridFS.Find method is an easy way to obtain such an iterator, -// but any iterator on the collection will work. -// -// If the provided *file is non-nil, OpenNext will close it before attempting -// to iterate to the next element. This means that in a loop one only -// has to worry about closing files when breaking out of the loop early -// (break, return, or panic). -// -// For example: -// -// gfs := db.GridFS("fs") -// query := gfs.Find(nil).Sort("filename") -// iter := query.Iter() -// var f *mgo.GridFile -// for gfs.OpenNext(iter, &f) { -// fmt.Printf("Filename: %s\n", f.Name()) -// } -// if iter.Close() != nil { -// panic(iter.Close()) -// } -// -func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { - if *file != nil { - // Ignoring the error here shouldn't be a big deal - // as we're reading the file and the loop iteration - // for this file is finished. - _ = (*file).Close() - } - var doc gfsFile - if !iter.Next(&doc) { - *file = nil - return false - } - f := gfs.newFile() - f.mode = gfsReading - f.doc = doc - *file = f - return true -} - -// Find runs query on GridFS's files collection and returns -// the resulting Query. -// -// This logic: -// -// gfs := db.GridFS("fs") -// iter := gfs.Find(nil).Iter() -// -// Is equivalent to: -// -// files := db.C("fs" + ".files") -// iter := files.Find(nil).Iter() -// -func (gfs *GridFS) Find(query interface{}) *Query { - return gfs.Files.Find(query) -} - -// RemoveId deletes the file with the provided id from the GridFS. -func (gfs *GridFS) RemoveId(id interface{}) error { - err := gfs.Files.Remove(bson.M{"_id": id}) - if err != nil { - return err - } - _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}}) - return err -} - -type gfsDocId struct { - Id interface{} "_id" -} - -// Remove deletes all files with the provided name from the GridFS. -func (gfs *GridFS) Remove(name string) (err error) { - iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() - var doc gfsDocId - for iter.Next(&doc) { - if e := gfs.RemoveId(doc.Id); e != nil { - err = e - } - } - if err == nil { - err = iter.Close() - } - return err -} - -func (file *GridFile) assertMode(mode gfsFileMode) { - switch file.mode { - case mode: - return - case gfsWriting: - panic("GridFile is open for writing") - case gfsReading: - panic("GridFile is open for reading") - case gfsClosed: - panic("GridFile is closed") - default: - panic("internal error: missing GridFile mode") - } -} - -// SetChunkSize sets size of saved chunks. Once the file is written to, it -// will be split in blocks of that size and each block saved into an -// independent chunk document. The default chunk size is 255kb. -// -// It is a runtime error to call this function once the file has started -// being written to. -func (file *GridFile) SetChunkSize(bytes int) { - file.assertMode(gfsWriting) - debugf("GridFile %p: setting chunk size to %d", file, bytes) - file.m.Lock() - file.doc.ChunkSize = bytes - file.m.Unlock() -} - -// Id returns the current file Id. -func (file *GridFile) Id() interface{} { - return file.doc.Id -} - -// SetId changes the current file Id. -// -// It is a runtime error to call this function once the file has started -// being written to, or when the file is not open for writing. -func (file *GridFile) SetId(id interface{}) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.Id = id - file.m.Unlock() -} - -// Name returns the optional file name. An empty string will be returned -// in case it is unset. -func (file *GridFile) Name() string { - return file.doc.Filename -} - -// SetName changes the optional file name. An empty string may be used to -// unset it. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetName(name string) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.Filename = name - file.m.Unlock() -} - -// ContentType returns the optional file content type. An empty string will be -// returned in case it is unset. -func (file *GridFile) ContentType() string { - return file.doc.ContentType -} - -// ContentType changes the optional file content type. An empty string may be -// used to unset it. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetContentType(ctype string) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.ContentType = ctype - file.m.Unlock() -} - -// GetMeta unmarshals the optional "metadata" field associated with the -// file into the result parameter. The meaning of keys under that field -// is user-defined. For example: -// -// result := struct{ INode int }{} -// err = file.GetMeta(&result) -// if err != nil { -// panic(err.String()) -// } -// fmt.Printf("inode: %d\n", result.INode) -// -func (file *GridFile) GetMeta(result interface{}) (err error) { - file.m.Lock() - if file.doc.Metadata != nil { - err = bson.Unmarshal(file.doc.Metadata.Data, result) - } - file.m.Unlock() - return -} - -// SetMeta changes the optional "metadata" field associated with the -// file. The meaning of keys under that field is user-defined. -// For example: -// -// file.SetMeta(bson.M{"inode": inode}) -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetMeta(metadata interface{}) { - file.assertMode(gfsWriting) - data, err := bson.Marshal(metadata) - file.m.Lock() - if err != nil && file.err == nil { - file.err = err - } else { - file.doc.Metadata = &bson.Raw{Data: data} - } - file.m.Unlock() -} - -// Size returns the file size in bytes. -func (file *GridFile) Size() (bytes int64) { - file.m.Lock() - bytes = file.doc.Length - file.m.Unlock() - return -} - -// MD5 returns the file MD5 as a hex-encoded string. -func (file *GridFile) MD5() (md5 string) { - return file.doc.MD5 -} - -// UploadDate returns the file upload time. -func (file *GridFile) UploadDate() time.Time { - return file.doc.UploadDate -} - -// SetUploadDate changes the file upload time. -// -// It is a runtime error to call this function when the file is not open -// for writing. -func (file *GridFile) SetUploadDate(t time.Time) { - file.assertMode(gfsWriting) - file.m.Lock() - file.doc.UploadDate = t - file.m.Unlock() -} - -// Close flushes any pending changes in case the file is being written -// to, waits for any background operations to finish, and closes the file. -// -// It's important to Close files whether they are being written to -// or read from, and to check the err result to ensure the operation -// completed successfully. -func (file *GridFile) Close() (err error) { - file.m.Lock() - defer file.m.Unlock() - if file.mode == gfsWriting { - if len(file.wbuf) > 0 && file.err == nil { - file.insertChunk(file.wbuf) - file.wbuf = file.wbuf[0:0] - } - file.completeWrite() - } else if file.mode == gfsReading && file.rcache != nil { - file.rcache.wait.Lock() - file.rcache = nil - } - file.mode = gfsClosed - debugf("GridFile %p: closed", file) - return file.err -} - -func (file *GridFile) completeWrite() { - for file.wpending > 0 { - debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) - file.c.Wait() - } - if file.err == nil { - hexsum := hex.EncodeToString(file.wsum.Sum(nil)) - if file.doc.UploadDate.IsZero() { - file.doc.UploadDate = bson.Now() - } - file.doc.MD5 = hexsum - file.err = file.gfs.Files.Insert(file.doc) - } - if file.err != nil { - file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) - } - if file.err == nil { - index := Index{ - Key: []string{"files_id", "n"}, - Unique: true, - } - file.err = file.gfs.Chunks.EnsureIndex(index) - } -} - -// Abort cancels an in-progress write, preventing the file from being -// automically created and ensuring previously written chunks are -// removed when the file is closed. -// -// It is a runtime error to call Abort when the file was not opened -// for writing. -func (file *GridFile) Abort() { - if file.mode != gfsWriting { - panic("file.Abort must be called on file opened for writing") - } - file.err = errors.New("write aborted") -} - -// Write writes the provided data to the file and returns the -// number of bytes written and an error in case something -// wrong happened. -// -// The file will internally cache the data so that all but the last -// chunk sent to the database have the size defined by SetChunkSize. -// This also means that errors may be deferred until a future call -// to Write or Close. -// -// The parameters and behavior of this function turn the file -// into an io.Writer. -func (file *GridFile) Write(data []byte) (n int, err error) { - file.assertMode(gfsWriting) - file.m.Lock() - debugf("GridFile %p: writing %d bytes", file, len(data)) - defer file.m.Unlock() - - if file.err != nil { - return 0, file.err - } - - n = len(data) - file.doc.Length += int64(n) - chunkSize := file.doc.ChunkSize - - if len(file.wbuf)+len(data) < chunkSize { - file.wbuf = append(file.wbuf, data...) - return - } - - // First, flush file.wbuf complementing with data. - if len(file.wbuf) > 0 { - missing := chunkSize - len(file.wbuf) - if missing > len(data) { - missing = len(data) - } - file.wbuf = append(file.wbuf, data[:missing]...) - data = data[missing:] - file.insertChunk(file.wbuf) - file.wbuf = file.wbuf[0:0] - } - - // Then, flush all chunks from data without copying. - for len(data) > chunkSize { - size := chunkSize - if size > len(data) { - size = len(data) - } - file.insertChunk(data[:size]) - data = data[size:] - } - - // And append the rest for a future call. - file.wbuf = append(file.wbuf, data...) - - return n, file.err -} - -func (file *GridFile) insertChunk(data []byte) { - n := file.chunk - file.chunk++ - debugf("GridFile %p: adding to checksum: %q", file, string(data)) - file.wsum.Write(data) - - for file.doc.ChunkSize*file.wpending >= 1024*1024 { - // Hold on.. we got a MB pending. - file.c.Wait() - if file.err != nil { - return - } - } - - file.wpending++ - - debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) - - // We may not own the memory of data, so rather than - // simply copying it, we'll marshal the document ahead of time. - data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) - if err != nil { - file.err = err - return - } - - go func() { - err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) - file.m.Lock() - file.wpending-- - if err != nil && file.err == nil { - file.err = err - } - file.c.Broadcast() - file.m.Unlock() - }() -} - -// Seek sets the offset for the next Read or Write on file to -// offset, interpreted according to whence: 0 means relative to -// the origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. It returns the new offset and -// an error, if any. -func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { - file.m.Lock() - debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) - defer file.m.Unlock() - switch whence { - case os.SEEK_SET: - case os.SEEK_CUR: - offset += file.offset - case os.SEEK_END: - offset += file.doc.Length - default: - panic("unsupported whence value") - } - if offset > file.doc.Length { - return file.offset, errors.New("seek past end of file") - } - if offset == file.doc.Length { - // If we're seeking to the end of the file, - // no need to read anything. This enables - // a client to find the size of the file using only the - // io.ReadSeeker interface with low overhead. - file.offset = offset - return file.offset, nil - } - chunk := int(offset / int64(file.doc.ChunkSize)) - if chunk+1 == file.chunk && offset >= file.offset { - file.rbuf = file.rbuf[int(offset-file.offset):] - file.offset = offset - return file.offset, nil - } - file.offset = offset - file.chunk = chunk - file.rbuf = nil - file.rbuf, err = file.getChunk() - if err == nil { - file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] - } - return file.offset, err -} - -// Read reads into b the next available data from the file and -// returns the number of bytes written and an error in case -// something wrong happened. At the end of the file, n will -// be zero and err will be set to io.EOF. -// -// The parameters and behavior of this function turn the file -// into an io.Reader. -func (file *GridFile) Read(b []byte) (n int, err error) { - file.assertMode(gfsReading) - file.m.Lock() - debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) - defer file.m.Unlock() - if file.offset == file.doc.Length { - return 0, io.EOF - } - for err == nil { - i := copy(b, file.rbuf) - n += i - file.offset += int64(i) - file.rbuf = file.rbuf[i:] - if i == len(b) || file.offset == file.doc.Length { - break - } - b = b[i:] - file.rbuf, err = file.getChunk() - } - return n, err -} - -func (file *GridFile) getChunk() (data []byte, err error) { - cache := file.rcache - file.rcache = nil - if cache != nil && cache.n == file.chunk { - debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) - cache.wait.Lock() - data, err = cache.data, cache.err - } else { - debugf("GridFile %p: Fetching chunk %d", file, file.chunk) - var doc gfsChunk - err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc) - data = doc.Data - } - file.chunk++ - if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { - // Read the next one in background. - cache = &gfsCachedChunk{n: file.chunk} - cache.wait.Lock() - debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) - // Clone the session to avoid having it closed in between. - chunks := file.gfs.Chunks - session := chunks.Database.Session.Clone() - go func(id interface{}, n int) { - defer session.Close() - chunks = chunks.With(session) - var doc gfsChunk - cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc) - cache.data = doc.Data - cache.wait.Unlock() - }(file.doc.Id, file.chunk) - file.rcache = cache - } - debugf("Returning err: %#v", err) - return -} diff --git a/vendor/gopkg.in/mgo.v2/internal/json/LICENSE b/vendor/gopkg.in/mgo.v2/internal/json/LICENSE deleted file mode 100644 index 7448756..0000000 --- a/vendor/gopkg.in/mgo.v2/internal/json/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/mgo.v2/internal/json/decode.go b/vendor/gopkg.in/mgo.v2/internal/json/decode.go deleted file mode 100644 index ce7c7d2..0000000 --- a/vendor/gopkg.in/mgo.v2/internal/json/decode.go +++ /dev/null @@ -1,1685 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// Unmarshal will only set exported fields of the struct. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice length -// to zero and then appends each element to the slice. -// As a special case, to unmarshal an empty JSON array into a slice, -// Unmarshal replaces the slice with a new empty slice. -// -// To unmarshal a JSON array into a Go array, Unmarshal decodes -// JSON array elements into corresponding Go array elements. -// If the Go array is smaller than the JSON array, -// the additional JSON array elements are discarded. -// If the JSON array is smaller than the Go array, -// the additional Go array elements are set to zero values. -// -// To unmarshal a JSON object into a map, Unmarshal first establishes a map to -// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal -// reuses the existing map, keeping existing entries. Unmarshal then stores key- -// value pairs from the JSON object into the map. The map's key type must -// either be a string or implement encoding.TextUnmarshaler. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshaling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by types -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// isValidNumber reports whether s is a valid JSON number literal. -func isValidNumber(s string) bool { - // This function implements the JSON numbers grammar. - // See https://tools.ietf.org/html/rfc7159#section-6 - // and http://json.org/number.gif - - if s == "" { - return false - } - - // Optional - - if s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - - // Digits - switch { - default: - return false - - case s[0] == '0': - s = s[1:] - - case '1' <= s[0] && s[0] <= '9': - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // . followed by 1 or more digits. - if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // e or E followed by an optional - or + and - // 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - s = s[1:] - if s[0] == '+' || s[0] == '-' { - s = s[1:] - if s == "" { - return false - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - } - } - - // Make sure we are at the end. - return s == "" -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool - ext Extension -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else if c == '[' { - d.scan.step(&d.scan, ']') - } else { - // Was inside a function name. Get out of it. - d.scan.step(&d.scan, '(') - d.scan.step(&d.scan, ')') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := d.data[d.off] - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - - case scanBeginName: - d.name(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginName: - switch v := d.nameInterface().(type) { - case nil, string: - return v - } - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, v - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, v - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if d.storeKeyed(pv) { - return - } - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: - // struct or - // map[string]T or map[encoding.TextUnmarshaler]T - switch v.Kind() { - case reflect.Map: - // Map key must either have string kind or be an encoding.TextUnmarshaler. - t := v.Type() - if t.Key().Kind() != reflect.String && - !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - empty := true - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - if !empty && !d.ext.trailingCommas { - d.syntaxError("beginning of object key string") - } - break - } - empty = false - if op == scanBeginName { - if !d.ext.unquotedKeys { - d.syntaxError("beginning of object key string") - } - } else if op != scanBeginLiteral { - d.error(errPhase) - } - unquotedKey := op == scanBeginName - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - var key []byte - if unquotedKey { - key = item - // TODO Fix code below to quote item when necessary. - } else { - var ok bool - key, ok = unquoteBytes(item) - if !ok { - d.error(errPhase) - } - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kt := v.Type().Key() - var kv reflect.Value - switch { - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(v.Type().Key()) - case reflect.PtrTo(kt).Implements(textUnmarshalerType): - kv = reflect.New(v.Type().Key()) - d.literalStore(item, kv, true) - kv = kv.Elem() - default: - panic("json: Unexpected key type") // should never occur - } - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// isNull returns whether there's a null literal at the provided offset. -func (d *decodeState) isNull(off int) bool { - if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' { - return false - } - d.nextscan.reset() - for i, c := range d.data[off:] { - if i > 4 { - return false - } - switch d.nextscan.step(&d.nextscan, c) { - case scanContinue, scanBeginName: - continue - } - break - } - return true -} - -// name consumes a const or function from d.data[d.off-1:], decoding into the value v. -// the first byte of the function name has been read already. -func (d *decodeState) name(v reflect.Value) { - if d.isNull(d.off-1) { - d.literal(v) - return - } - - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if d.storeKeyed(pv) { - return - } - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over function in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - out := d.nameInterface() - if out == nil { - v.Set(reflect.Zero(v.Type())) - } else { - v.Set(reflect.ValueOf(out)) - } - return - } - - nameStart := d.off - 1 - - op := d.scanWhile(scanContinue) - - name := d.data[nameStart : d.off-1] - if op != scanParam { - // Back up so the byte just read is consumed next. - d.off-- - d.scan.undo(op) - if l, ok := d.convertLiteral(name); ok { - d.storeValue(v, l) - return - } - d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) - } - - funcName := string(name) - funcData := d.ext.funcs[funcName] - if funcData.key == "" { - d.error(fmt.Errorf("json: unknown function %q", funcName)) - } - - // Check type of target: - // struct or - // map[string]T or map[encoding.TextUnmarshaler]T - switch v.Kind() { - case reflect.Map: - // Map key must either have string kind or be an encoding.TextUnmarshaler. - t := v.Type() - if t.Key().Kind() != reflect.String && - !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - // TODO Fix case of func field as map. - //topv := v - - // Figure out field corresponding to function. - key := []byte(funcData.key) - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - v = reflect.New(elemType).Elem() - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - for _, i := range f.index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = v.Field(i) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - } - } - - // Check for unmarshaler on func field itself. - u, ut, pv = d.indirect(v, false) - if u != nil { - d.off = nameStart - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - - var mapElem reflect.Value - - // Parse function arguments. - for i := 0; ; i++ { - // closing ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - if i >= len(funcData.args) { - d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) - } - key := []byte(funcData.args[i]) - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kt := v.Type().Key() - var kv reflect.Value - switch { - case kt.Kind() == reflect.String: - kv = reflect.ValueOf(key).Convert(v.Type().Key()) - case reflect.PtrTo(kt).Implements(textUnmarshalerType): - kv = reflect.New(v.Type().Key()) - d.literalStore(key, kv, true) - kv = kv.Elem() - default: - panic("json: Unexpected key type") // should never occur - } - v.SetMapIndex(kv, subv) - } - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - if op != scanParam { - d.error(errPhase) - } - } -} - -// keyed attempts to decode an object or function using a keyed doc extension, -// and returns the value and true on success, or nil and false otherwise. -func (d *decodeState) keyed() (interface{}, bool) { - if len(d.ext.keyed) == 0 { - return nil, false - } - - unquote := false - - // Look-ahead first key to check for a keyed document extension. - d.nextscan.reset() - var start, end int - for i, c := range d.data[d.off-1:] { - switch op := d.nextscan.step(&d.nextscan, c); op { - case scanSkipSpace, scanContinue, scanBeginObject: - continue - case scanBeginLiteral, scanBeginName: - unquote = op == scanBeginLiteral - start = i - continue - } - end = i - break - } - - name := d.data[d.off-1+start : d.off-1+end] - - var key []byte - var ok bool - if unquote { - key, ok = unquoteBytes(name) - if !ok { - d.error(errPhase) - } - } else { - funcData, ok := d.ext.funcs[string(name)] - if !ok { - return nil, false - } - key = []byte(funcData.key) - } - - decode, ok := d.ext.keyed[string(key)] - if !ok { - return nil, false - } - - d.off-- - out, err := decode(d.next()) - if err != nil { - d.error(err) - } - return out, true -} - -func (d *decodeState) storeKeyed(v reflect.Value) bool { - keyed, ok := d.keyed() - if !ok { - return false - } - d.storeValue(v, keyed) - return true -} - -var ( - trueBytes = []byte("true") - falseBytes = []byte("false") - nullBytes = []byte("null") -) - -func (d *decodeState) storeValue(v reflect.Value, from interface{}) { - switch from { - case nil: - d.literalStore(nullBytes, v, false) - return - case true: - d.literalStore(trueBytes, v, false) - return - case false: - d.literalStore(falseBytes, v, false) - return - } - fromv := reflect.ValueOf(from) - for fromv.Kind() == reflect.Ptr && !fromv.IsNil() { - fromv = fromv.Elem() - } - fromt := fromv.Type() - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - vt := v.Type() - if fromt.AssignableTo(vt) { - v.Set(fromv) - } else if fromt.ConvertibleTo(vt) { - v.Set(fromv.Convert(vt)) - } else { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - } -} - -func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) { - if len(name) == 0 { - return nil, false - } - switch name[0] { - case 't': - if bytes.Equal(name, trueBytes) { - return true, true - } - case 'f': - if bytes.Equal(name, falseBytes) { - return false, true - } - case 'n': - if bytes.Equal(name, nullBytes) { - return nil, true - } - } - if l, ok := d.ext.consts[string(name)]; ok { - return l, true - } - return nil, false -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.SetBytes(b[:n]) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - if !isValidNumber(s) { - d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) - } - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - case scanBeginName: - return d.nameInterface() - } -} - -func (d *decodeState) syntaxError(expected string) { - msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected) - d.error(&SyntaxError{msg, int64(d.off)}) -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - if len(v) > 0 && !d.ext.trailingCommas { - d.syntaxError("beginning of value") - } - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() interface{} { - v, ok := d.keyed() - if ok { - return v - } - - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - if len(m) > 0 && !d.ext.trailingCommas { - d.syntaxError("beginning of object key string") - } - break - } - if op == scanBeginName { - if !d.ext.unquotedKeys { - d.syntaxError("beginning of object key string") - } - } else if op != scanBeginLiteral { - d.error(errPhase) - } - unquotedKey := op == scanBeginName - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - var key string - if unquotedKey { - key = string(item) - } else { - var ok bool - key, ok = unquote(item) - if !ok { - d.error(errPhase) - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// nameInterface is like function but returns map[string]interface{}. -func (d *decodeState) nameInterface() interface{} { - v, ok := d.keyed() - if ok { - return v - } - - nameStart := d.off - 1 - - op := d.scanWhile(scanContinue) - - name := d.data[nameStart : d.off-1] - if op != scanParam { - // Back up so the byte just read is consumed next. - d.off-- - d.scan.undo(op) - if l, ok := d.convertLiteral(name); ok { - return l - } - d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) - } - - funcName := string(name) - funcData := d.ext.funcs[funcName] - if funcData.key == "" { - d.error(fmt.Errorf("json: unknown function %q", funcName)) - } - - m := make(map[string]interface{}) - for i := 0; ; i++ { - // Look ahead for ) - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - if i >= len(funcData.args) { - d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) - } - m[funcData.args[i]] = d.valueInterface() - - // Next token must be , or ). - op = d.scanWhile(scanSkipSpace) - if op == scanEndParams { - break - } - if op != scanParam { - d.error(errPhase) - } - } - return map[string]interface{}{funcData.key: m} -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/gopkg.in/mgo.v2/internal/json/encode.go b/vendor/gopkg.in/mgo.v2/internal/json/encode.go deleted file mode 100644 index 67a0f00..0000000 --- a/vendor/gopkg.in/mgo.v2/internal/json/encode.go +++ /dev/null @@ -1,1256 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON as defined in -// RFC 4627. The mapping between JSON and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "fmt" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. If no MarshalJSON method is present but the -// value implements encoding.TextMarshaler instead, Marshal calls -// its MarshalText method. -// The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// This escaping can be disabled using an Encoder with DisableHTMLEscaping. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON value. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. The map's key type must either be a string -// or implement encoding.TextMarshaler. The map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON value. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON value. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v, encOpts{escapeHTML: true}) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML